From 07ccfde198c978b8c86b7091773e3238bfcdf454 Mon Sep 17 00:00:00 2001 From: jakevin <30525741+jackwener@users.noreply.github.com> Date: Wed, 3 Nov 2021 23:16:03 +0800 Subject: [PATCH 01/53] Specify the floating point accuracy of the round() function (#3178) * round modification * add test and fix problem * fix * add test * switch to if * add test Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- src/common/function/FunctionManager.cpp | 18 +++++--- .../function/test/FunctionManagerTest.cpp | 18 ++++++++ .../features/expression/FunctionCall.feature | 44 +++++++++++++++++++ 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/src/common/function/FunctionManager.cpp b/src/common/function/FunctionManager.cpp index 686891529b2..2a38b6736f3 100644 --- a/src/common/function/FunctionManager.cpp +++ b/src/common/function/FunctionManager.cpp @@ -55,7 +55,9 @@ std::unordered_map> FunctionManager::typ TypeSignature({Value::Type::FLOAT}, Value::Type::FLOAT)}}, {"round", {TypeSignature({Value::Type::INT}, Value::Type::FLOAT), - TypeSignature({Value::Type::FLOAT}, Value::Type::FLOAT)}}, + TypeSignature({Value::Type::INT, Value::Type::INT}, Value::Type::FLOAT), + TypeSignature({Value::Type::FLOAT}, Value::Type::FLOAT), + TypeSignature({Value::Type::FLOAT, Value::Type::INT}, Value::Type::FLOAT)}}, {"sqrt", {TypeSignature({Value::Type::INT}, Value::Type::FLOAT), TypeSignature({Value::Type::FLOAT}, Value::Type::FLOAT)}}, @@ -539,17 +541,23 @@ FunctionManager::FunctionManager() { // to nearest integral (as a floating-point value) auto &attr = functions_["round"]; attr.minArity_ = 1; - attr.maxArity_ = 1; + attr.maxArity_ = 2; attr.isPure_ = true; attr.body_ = [](const auto &args) -> Value { switch (args[0].get().type()) { case Value::Type::NULLVALUE: { return Value::kNullValue; } - case Value::Type::INT: { - return std::round(args[0].get().getInt()); - } + case Value::Type::INT: case Value::Type::FLOAT: { + if (args.size() == 2) { + if (args[1].get().type() == Value::Type::INT) { + auto decimal = args[1].get().getInt(); + return std::round(args[0].get().getFloat() * pow(10, decimal)) / pow(10, decimal); + } else { + return Value::kNullBadType; + } + } return std::round(args[0].get().getFloat()); } default: { diff --git a/src/common/function/test/FunctionManagerTest.cpp b/src/common/function/test/FunctionManagerTest.cpp index e0383f1106d..9305c788e59 100644 --- a/src/common/function/test/FunctionManagerTest.cpp +++ b/src/common/function/test/FunctionManagerTest.cpp @@ -103,6 +103,9 @@ std::unordered_map> FunctionManagerTest::args_ = {"one", {-1.2}}, {"two", {2, 4}}, {"pow", {2, 3}}, + {"round1", {11111.11111, 2}}, + {"round2", {11111.11111, -1}}, + {"round3", {11111.11111, -5}}, {"radians", {180}}, {"range1", {1, 5}}, {"range2", {1, 5, 2}}, @@ -268,6 +271,11 @@ TEST_F(FunctionManagerTest, functionCall) { TEST_FUNCTION(log, args_["int"], std::log(4)); TEST_FUNCTION(log2, args_["int"], 2.0); } + { + TEST_FUNCTION(round, args_["round1"], 11111.11); + TEST_FUNCTION(round, args_["round2"], 11110.0); + TEST_FUNCTION(round, args_["round3"], 0.0); + } { TEST_FUNCTION(range, args_["range1"], Value(List({1, 2, 3, 4, 5}))); TEST_FUNCTION(range, args_["range2"], Value(List({1, 3, 5}))); @@ -916,11 +924,21 @@ TEST_F(FunctionManagerTest, returnType) { ASSERT_TRUE(result.ok()); EXPECT_EQ(result.value(), Value::Type::FLOAT); } + { + auto result = FunctionManager::getReturnType("round", {Value::Type::INT, Value::Type::INT}); + ASSERT_TRUE(result.ok()); + EXPECT_EQ(result.value(), Value::Type::FLOAT); + } { auto result = FunctionManager::getReturnType("round", {Value::Type::FLOAT}); ASSERT_TRUE(result.ok()); EXPECT_EQ(result.value(), Value::Type::FLOAT); } + { + auto result = FunctionManager::getReturnType("round", {Value::Type::FLOAT, Value::Type::INT}); + ASSERT_TRUE(result.ok()); + EXPECT_EQ(result.value(), Value::Type::FLOAT); + } { auto result = FunctionManager::getReturnType("cbrt", {Value::Type::INT}); ASSERT_TRUE(result.ok()); diff --git a/tests/tck/features/expression/FunctionCall.feature b/tests/tck/features/expression/FunctionCall.feature index cd71235419e..5c5a6a00a46 100644 --- a/tests/tck/features/expression/FunctionCall.feature +++ b/tests/tck/features/expression/FunctionCall.feature @@ -121,6 +121,50 @@ Feature: Function Call Expression | result | | NULL | + Scenario: round + When executing query: + """ + YIELD round(3.1415926, 9) as result + """ + Then the result should be, in any order: + | result | + | 3.1415926 | + When executing query: + """ + YIELD round(3.1415926, 2) as result + """ + Then the result should be, in any order: + | result | + | 3.14 | + When executing query: + """ + YIELD round(3.1415926, 3) as result + """ + Then the result should be, in any order: + | result | + | 3.142 | + When executing query: + """ + YIELD round(3.14159265359, 0) as result + """ + Then the result should be, in any order: + | result | + | 3.0 | + When executing query: + """ + YIELD round(35543.14159265359, -3) as result + """ + Then the result should be, in any order: + | result | + | 36000.0 | + When executing query: + """ + YIELD round(35543.14159265359, -5) as result + """ + Then the result should be, in any order: + | result | + | 0.0 | + Scenario: error check When executing query: """ From 22a5fe912d74398b209be512d429d9be87dba5b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BD=95=E5=BB=B6=E9=BE=99?= Date: Fri, 5 Nov 2021 10:17:46 +0800 Subject: [PATCH 02/53] Add license check (#3268) * [CI] Add license check * [CI] Add license check * [CI] Add license check * [CI] Add license check --- .github/workflows/pull_request.yml | 2 ++ .github/workflows/release.yml | 4 +++ .licenserc.yaml | 46 ++++++++++++++++++++++++++ scripts/meta-transfer-tools.sh | 5 +++ scripts/utils.sh | 5 +++ src/common/datatypes/EdgeOps-inl.h | 7 ++-- src/common/datatypes/HostAddrOps-inl.h | 7 ++-- src/common/datatypes/PathOps-inl.h | 7 ++-- src/common/datatypes/ValueOps-inl.h | 7 ++-- src/common/geo/GeoIndex.cpp | 7 ++-- 10 files changed, 87 insertions(+), 10 deletions(-) create mode 100644 .licenserc.yaml diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 7f8bda75acb..412c022ec4b 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -20,6 +20,8 @@ jobs: - uses: actions/checkout@v2 with: fetch-depth: 2 + - name: Check License Header + uses: apache/skywalking-eyes@main - name: Cpplint run: | ln -snf $PWD/.linters/cpp/hooks/pre-commit.sh $PWD/.linters/cpp/pre-commit.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 31cd8c41921..823b5d92f2d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,6 +33,8 @@ jobs: with: run: sh -c "find . -mindepth 1 -delete" - uses: actions/checkout@v2 + - name: Check License Header + uses: apache/skywalking-eyes@main - uses: ./.github/actions/tagname-action id: tag - name: package @@ -74,6 +76,8 @@ jobs: with: run: sh -c "find . -mindepth 1 -delete" - uses: actions/checkout@v2 + - name: Check License Header + uses: apache/skywalking-eyes@main - uses: ./.github/actions/tagname-action id: tagname - id: docker diff --git a/.licenserc.yaml b/.licenserc.yaml new file mode 100644 index 00000000000..d1556265c78 --- /dev/null +++ b/.licenserc.yaml @@ -0,0 +1,46 @@ +# +# Copyright (c) 2020 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +header: + license: + spdx-id: Apache-2.0 + copyright-owner: vesoft inc + content: | + + Copyright (c) 2020 vesoft inc. All rights reserved. + + This source code is licensed under Apache 2.0 License. + + pattern: | + + Copyright (c) \d{4} vesoft inc. All rights reserved. + + This source code is licensed under Apache 2.0 License. + + paths-ignore: + - '.licenserc.yaml' + - '.clang-tidy' + - '.dockerignore' + - '**/.gitignore' + - 'cmake' + - 'conf' + - 'docker' + - 'package' + - 'resources/*.csv' + - 'resources/*.json' + - 'scripts/*.service' + - 'src/**/*.thrift' + - 'src/**/*.lex' + - 'src/**/*.yy' + - 'src/**/*.dict' + - 'src/**/*.in' + - 'tests' + - '.github' + - '.linters' + - '**/*.md' + - 'third-party' + - 'LICENSE' + - 'NOTICE' + + comment: on-failure diff --git a/scripts/meta-transfer-tools.sh b/scripts/meta-transfer-tools.sh index 9925c8aa363..3bb316d47af 100755 --- a/scripts/meta-transfer-tools.sh +++ b/scripts/meta-transfer-tools.sh @@ -1,4 +1,9 @@ #! /bin/bash +# +# Copyright (c) 2020 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +# src= dst= diff --git a/scripts/utils.sh b/scripts/utils.sh index 55f7df6a629..1d115ac7b56 100644 --- a/scripts/utils.sh +++ b/scripts/utils.sh @@ -1,3 +1,8 @@ +# +# Copyright (c) 2020 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +# # Some color code definitions RED= GREEN= diff --git a/src/common/datatypes/EdgeOps-inl.h b/src/common/datatypes/EdgeOps-inl.h index c5ad128a330..f86ec5fbcf3 100644 --- a/src/common/datatypes/EdgeOps-inl.h +++ b/src/common/datatypes/EdgeOps-inl.h @@ -1,6 +1,9 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* + * + * Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. * - * obj source code is licensed under Apache 2.0 License. */ #ifndef COMMON_DATATYPES_EDGEOPS_H_ diff --git a/src/common/datatypes/HostAddrOps-inl.h b/src/common/datatypes/HostAddrOps-inl.h index cabe2193234..47f8554b7f2 100644 --- a/src/common/datatypes/HostAddrOps-inl.h +++ b/src/common/datatypes/HostAddrOps-inl.h @@ -1,6 +1,9 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* + * + * Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. * - * obj source code is licensed under Apache 2.0 License. */ #ifndef COMMON_DATATYPES_HOSTADDROPS_H_ diff --git a/src/common/datatypes/PathOps-inl.h b/src/common/datatypes/PathOps-inl.h index ff3b55e1cb7..a22074bf995 100644 --- a/src/common/datatypes/PathOps-inl.h +++ b/src/common/datatypes/PathOps-inl.h @@ -1,6 +1,9 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* + * + * Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. * - * obj source code is licensed under Apache 2.0 License. */ #ifndef COMMON_DATATYPES_PATHOPS_H_ diff --git a/src/common/datatypes/ValueOps-inl.h b/src/common/datatypes/ValueOps-inl.h index 0e7dde8ecae..0fa5729be13 100644 --- a/src/common/datatypes/ValueOps-inl.h +++ b/src/common/datatypes/ValueOps-inl.h @@ -1,6 +1,9 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* + * + * Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. * - * obj source code is licensed under Apache 2.0 License. */ #ifndef COMMON_DATATYPES_VALUEOPS_H_ diff --git a/src/common/geo/GeoIndex.cpp b/src/common/geo/GeoIndex.cpp index 72e8f842136..1d7e3249ef7 100644 --- a/src/common/geo/GeoIndex.cpp +++ b/src/common/geo/GeoIndex.cpp @@ -1,6 +1,9 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* + * + * Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. * - * This sourc_e code is licensed under Apache 2.0 License. */ #include "common/geo/GeoIndex.h" From 7e253961c0d31b2edd7085f30af427ee5330ef9c Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Fri, 5 Nov 2021 14:54:51 +0800 Subject: [PATCH 03/53] Remove duplicate license files (#3276) --- .dockerignore | 1 - LICENSES/Apache-2.0.txt | 202 ---------------------------------------- 2 files changed, 203 deletions(-) delete mode 100644 LICENSES/Apache-2.0.txt diff --git a/.dockerignore b/.dockerignore index 07bc73cd16c..7cc234ab2f3 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,4 @@ *.md -LICENSES/ docs/ *.swp diff --git a/LICENSES/Apache-2.0.txt b/LICENSES/Apache-2.0.txt deleted file mode 100644 index d6456956733..00000000000 --- a/LICENSES/Apache-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. From 58f0b440c8cef4eff7cfb1d52b1d4fc20cf29ec8 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Fri, 5 Nov 2021 16:50:09 +0800 Subject: [PATCH 04/53] Limit workflow job concurrency (#3277) * Limit workflow job concurrency https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#concurrency * Limit workflow * Use workflow level concurrency --- .github/workflows/nightly.yml | 4 ++++ .github/workflows/pull_request.yml | 4 ++++ .github/workflows/release.yml | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 8ef61324fce..947a55d1a75 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -4,6 +4,10 @@ on: schedule: - cron: '0 18 * * *' +concurrency: + group: nightly + cancel-in-progress: true + defaults: run: shell: bash diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 412c022ec4b..55dfa57a1c4 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -7,6 +7,10 @@ on: - master - 'v[0-9]+.*' +concurrency: + group: ${{ github.head_ref }} + cancel-in-progress: true + defaults: run: shell: bash diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 823b5d92f2d..fd748970dea 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,6 +5,10 @@ on: types: - published +concurrency: + group: release + cancel-in-progress: true + defaults: run: shell: bash From 219d000a30ea1d0f4f41a4f835aca7a90d54df7a Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Mon, 8 Nov 2021 17:18:43 +0800 Subject: [PATCH 05/53] Refactor storage index (#3196) * stash * commit IndexEdge/VertexScanNode * commit IndexSelectionNode * commit Projection/Dedup Node * commit IndexLimitNode * stash * stash * stash * stash * add index int test * add double/string1 test * add string2/string3 test * finish index_scan_node unittest * add index node test * pass lookupindex test * pass ttl test * pass all unittest * remove debug log * fix bug * fix bug * fix bug * clear file * clear some useless code * add comments for IndexNode * add comments to IndexScanNode * ad comment to Selection/Projection/DedupNode * fix some init bug * fix bug to support geo * converge qualified strategy define together * address comments * address some comments; Modify IndexNode::next return Type * fix bug * add some comments * Add blank lines between function definitions * fix compile error * modify new file license * Modify test to avoid the bug mentioned in issue3191 * modify license error --- src/common/utils/IndexKeyUtils.h | 55 +- src/common/utils/test/CMakeLists.txt | 3 + src/daemons/CMakeLists.txt | 2 + src/graph/optimizer/OptimizerUtils.cpp | 13 +- src/interface/storage.thrift | 4 + src/kvstore/test/CMakeLists.txt | 1 + src/storage/CMakeLists.txt | 9 + src/storage/CommonUtils.h | 3 + src/storage/ExprVisitorBase.cpp | 85 + src/storage/ExprVisitorBase.h | 71 + src/storage/exec/IndexDedupNode.cpp | 93 + src/storage/exec/IndexDedupNode.h | 79 + src/storage/exec/IndexEdgeNode.h | 107 - src/storage/exec/IndexEdgeScanNode.cpp | 145 ++ src/storage/exec/IndexEdgeScanNode.h | 49 + src/storage/exec/IndexFilterNode.h | 170 -- src/storage/exec/IndexLimitNode.cpp | 51 + src/storage/exec/IndexLimitNode.h | 26 + src/storage/exec/IndexNode.cpp | 30 + src/storage/exec/IndexNode.h | 192 ++ src/storage/exec/IndexOutputNode.h | 356 ---- src/storage/exec/IndexProjectionNode.cpp | 63 + src/storage/exec/IndexProjectionNode.h | 46 + src/storage/exec/IndexScanNode.cpp | 547 ++++++ src/storage/exec/IndexScanNode.h | 566 ++++-- src/storage/exec/IndexSelectionNode.cpp | 78 + src/storage/exec/IndexSelectionNode.h | 136 ++ src/storage/exec/IndexVertexNode.h | 99 - src/storage/exec/IndexVertexScanNode.cpp | 117 ++ src/storage/exec/IndexVertexScanNode.h | 57 + src/storage/exec/StorageIterator.h | 2 +- src/storage/index/LookupBaseProcessor-inl.h | 472 ----- src/storage/index/LookupBaseProcessor.h | 92 - src/storage/index/LookupProcessor.cpp | 307 ++- src/storage/index/LookupProcessor.h | 52 +- src/storage/test/CMakeLists.txt | 14 + src/storage/test/IndexScanTest.cpp | 3 +- src/storage/test/IndexTest.cpp | 1937 +++++++++++++++++++ src/storage/test/IndexTestUtil.h | 611 ++++++ src/storage/test/LookupIndexTest.cpp | 4 + src/tools/db-dump/CMakeLists.txt | 1 + src/tools/meta-dump/CMakeLists.txt | 1 + tests/tck/features/lookup/LookUp.feature | 1 - 43 files changed, 5143 insertions(+), 1607 deletions(-) create mode 100644 src/storage/ExprVisitorBase.cpp create mode 100644 src/storage/ExprVisitorBase.h create mode 100644 src/storage/exec/IndexDedupNode.cpp create mode 100644 src/storage/exec/IndexDedupNode.h delete mode 100644 src/storage/exec/IndexEdgeNode.h create mode 100644 src/storage/exec/IndexEdgeScanNode.cpp create mode 100644 src/storage/exec/IndexEdgeScanNode.h delete mode 100644 src/storage/exec/IndexFilterNode.h create mode 100644 src/storage/exec/IndexLimitNode.cpp create mode 100644 src/storage/exec/IndexLimitNode.h create mode 100644 src/storage/exec/IndexNode.cpp create mode 100644 src/storage/exec/IndexNode.h delete mode 100644 src/storage/exec/IndexOutputNode.h create mode 100644 src/storage/exec/IndexProjectionNode.cpp create mode 100644 src/storage/exec/IndexProjectionNode.h create mode 100644 src/storage/exec/IndexScanNode.cpp create mode 100644 src/storage/exec/IndexSelectionNode.cpp create mode 100644 src/storage/exec/IndexSelectionNode.h delete mode 100644 src/storage/exec/IndexVertexNode.h create mode 100644 src/storage/exec/IndexVertexScanNode.cpp create mode 100644 src/storage/exec/IndexVertexScanNode.h delete mode 100644 src/storage/index/LookupBaseProcessor-inl.h delete mode 100644 src/storage/index/LookupBaseProcessor.h create mode 100644 src/storage/test/IndexTest.cpp create mode 100644 src/storage/test/IndexTestUtil.h diff --git a/src/common/utils/IndexKeyUtils.h b/src/common/utils/IndexKeyUtils.h index 23288827422..5bb6c5c07a8 100644 --- a/src/common/utils/IndexKeyUtils.h +++ b/src/common/utils/IndexKeyUtils.h @@ -6,13 +6,14 @@ #ifndef COMMON_UTILS_INDEXKEYUTILS_H_ #define COMMON_UTILS_INDEXKEYUTILS_H_ +#include + #include "codec/RowReader.h" #include "common/base/Base.h" #include "common/base/StatusOr.h" #include "common/geo/GeoIndex.h" #include "common/utils/Types.h" #include "interface/gen-cpp2/meta_types.h" - namespace nebula { using PropertyType = nebula::cpp2::PropertyType; @@ -197,39 +198,39 @@ class IndexKeyUtils final { */ static std::string encodeDouble(double v) { - if (v < 0) { - /** - * TODO : now, the -(std::numeric_limits::min()) - * have a problem of precision overflow. current return value is -nan. - */ - auto* c1 = reinterpret_cast(&v); - auto i = *reinterpret_cast(c1); - i = -(std::numeric_limits::max() + i); - auto* c2 = reinterpret_cast(&i); - v = *reinterpret_cast(c2); + if (std::isnan(v)) { + return std::string(sizeof(double), '\xFF'); + } else if (v >= 0) { + auto val = folly::Endian::big(v); + auto* c = reinterpret_cast(&val); + c[0] |= 0x80; + std::string raw; + raw.reserve(sizeof(double)); + raw.append(c, sizeof(double)); + return raw; + } else { + int64_t* x = reinterpret_cast(&v); + *x = ~(*x); + auto val = folly::Endian::big(v); + auto* c = reinterpret_cast(&val); + std::string raw; + raw.reserve(sizeof(double)); + raw.append(c, sizeof(double)); + return raw; } - auto val = folly::Endian::big(v); - auto* c = reinterpret_cast(&val); - c[0] ^= 0x80; - std::string raw; - raw.reserve(sizeof(double)); - raw.append(c, sizeof(double)); - return raw; } static double decodeDouble(const folly::StringPiece& raw) { - char* v = const_cast(raw.data()); - v[0] ^= 0x80; - auto val = *reinterpret_cast(v); + int64_t val = *reinterpret_cast(raw.data()); val = folly::Endian::big(val); if (val < 0) { - auto* c1 = reinterpret_cast(&val); - auto i = *reinterpret_cast(c1); - i = -(std::numeric_limits::max() + i); - auto* c2 = reinterpret_cast(&i); - val = *reinterpret_cast(c2); + val &= 0x7fffffffffffffff; + } else { + val = ~val; } - return val; + double ret; + ::memcpy(&ret, &val, 8); + return ret; } static std::string encodeTime(const nebula::Time& t) { diff --git a/src/common/utils/test/CMakeLists.txt b/src/common/utils/test/CMakeLists.txt index 5e78743cf97..7cbc4c2b045 100644 --- a/src/common/utils/test/CMakeLists.txt +++ b/src/common/utils/test/CMakeLists.txt @@ -11,6 +11,7 @@ nebula_add_test( $ $ $ + $ $ LIBRARIES gtest @@ -30,6 +31,7 @@ nebula_add_test( $ $ $ + $ $ LIBRARIES gtest @@ -52,6 +54,7 @@ nebula_add_test( $ $ $ + $ $ LIBRARIES gtest diff --git a/src/daemons/CMakeLists.txt b/src/daemons/CMakeLists.txt index 67c57580366..ac98b67ff03 100644 --- a/src/daemons/CMakeLists.txt +++ b/src/daemons/CMakeLists.txt @@ -45,6 +45,7 @@ set(storage_meta_deps $ $ $ + $ $ ) @@ -128,6 +129,7 @@ nebula_add_executable( $ $ $ + $ $ ${common_deps} LIBRARIES diff --git a/src/graph/optimizer/OptimizerUtils.cpp b/src/graph/optimizer/OptimizerUtils.cpp index 3c0de166552..0537bd2fd1b 100644 --- a/src/graph/optimizer/OptimizerUtils.cpp +++ b/src/graph/optimizer/OptimizerUtils.cpp @@ -464,18 +464,7 @@ Value OptimizerUtils::normalizeValue(const meta::cpp2::ColumnDef& col, const Val if (!col.type.type_length_ref().has_value()) { return Value::kNullBadType; } - if (!v.isStr()) { - return v; - } - auto len = static_cast(*col.get_type().get_type_length()); - if (v.getStr().size() > len) { - return Value(v.getStr().substr(0, len)); - } else { - std::string s; - s.reserve(len); - s.append(v.getStr()).append(len - v.getStr().size(), '\0'); - return Value(std::move(s)); - } + return v; } case Value::Type::__EMPTY__: case Value::Type::NULLVALUE: diff --git a/src/interface/storage.thrift b/src/interface/storage.thrift index 9099ec610e2..be82a0ca2e3 100644 --- a/src/interface/storage.thrift +++ b/src/interface/storage.thrift @@ -506,6 +506,10 @@ struct IndexColumnHint { 2: ScanType scan_type, 3: common.Value begin_value, 4: common.Value end_value, + // When `columnhint` means ` >= begin_value`, `include_begin` is true + // and include_end is similar + 5: bool include_begin = true, + 6: bool include_end = false, } struct IndexQueryContext { diff --git a/src/kvstore/test/CMakeLists.txt b/src/kvstore/test/CMakeLists.txt index 75a807479cb..7c872b29eda 100644 --- a/src/kvstore/test/CMakeLists.txt +++ b/src/kvstore/test/CMakeLists.txt @@ -34,6 +34,7 @@ set(KVSTORE_TEST_LIBS $ $ $ + $ $ ) diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index a1b081f3d43..c182f8b4f99 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -27,6 +27,7 @@ nebula_add_library( nebula_add_library( graph_storage_service_handler OBJECT GraphStorageServiceHandler.cpp + ExprVisitorBase.cpp context/StorageExpressionContext.cpp mutate/AddVerticesProcessor.cpp mutate/DeleteVerticesProcessor.cpp @@ -40,6 +41,14 @@ nebula_add_library( query/ScanVertexProcessor.cpp query/ScanEdgeProcessor.cpp index/LookupProcessor.cpp + exec/IndexNode.cpp + exec/IndexDedupNode.cpp + exec/IndexEdgeScanNode.cpp + exec/IndexLimitNode.cpp + exec/IndexProjectionNode.cpp + exec/IndexScanNode.cpp + exec/IndexSelectionNode.cpp + exec/IndexVertexScanNode.cpp ) nebula_add_library( diff --git a/src/storage/CommonUtils.h b/src/storage/CommonUtils.h index 009680c6476..66479abb766 100644 --- a/src/storage/CommonUtils.h +++ b/src/storage/CommonUtils.h @@ -200,6 +200,9 @@ struct RuntimeContext { ObjectPool* objPool() { return &planContext_->objPool_; } bool isPlanKilled() { + if (env() == nullptr) { + return false; + } return env()->metaClient_ && env()->metaClient_->checkIsPlanKilled(planContext_->sessionId_, planContext_->planId_); } diff --git a/src/storage/ExprVisitorBase.cpp b/src/storage/ExprVisitorBase.cpp new file mode 100644 index 00000000000..9f56e8a977b --- /dev/null +++ b/src/storage/ExprVisitorBase.cpp @@ -0,0 +1,85 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/ExprVisitorBase.h" +namespace nebula { +namespace storage { +void ExprVisitorBase::visit(ConstantExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(UnaryExpression *expr) { expr->operand()->accept(this); } +void ExprVisitorBase::visit(TypeCastingExpression *expr) { expr->operand()->accept(this); } +void ExprVisitorBase::visit(LabelExpression *expr) { fatal(expr); } +void ExprVisitorBase::visit(LabelAttributeExpression *expr) { fatal(expr); } +// binary expression +void ExprVisitorBase::visit(ArithmeticExpression *expr) { + expr->left()->accept(this); + expr->right()->accept(this); +} +void ExprVisitorBase::visit(RelationalExpression *expr) { + expr->left()->accept(this); + expr->right()->accept(this); +} +void ExprVisitorBase::visit(SubscriptExpression *expr) { + expr->left()->accept(this); + expr->right()->accept(this); +} +void ExprVisitorBase::visit(AttributeExpression *expr) { fatal(expr); } +void ExprVisitorBase::visit(LogicalExpression *expr) { + for (auto operand : expr->operands()) { + operand->accept(this); + } +} +// function call +void ExprVisitorBase::visit(FunctionCallExpression *expr) { + for (auto arg : expr->args()->args()) { + arg->accept(this); + } +} +void ExprVisitorBase::visit(AggregateExpression *expr) { fatal(expr); } +void ExprVisitorBase::visit(UUIDExpression *expr) { UNUSED(expr); } +// variable expression +void ExprVisitorBase::visit(VariableExpression *expr) { fatal(expr); } +void ExprVisitorBase::visit(VersionedVariableExpression *expr) { fatal(expr); } +// container expression +void ExprVisitorBase::visit(ListExpression *expr) { + for (auto item : expr->items()) { + item->accept(this); + } +} +void ExprVisitorBase::visit(SetExpression *expr) { + for (auto item : expr->items()) { + item->accept(this); + } +} +void ExprVisitorBase::visit(MapExpression *expr) { UNUSED(expr); } +// property Expression +void ExprVisitorBase::visit(TagPropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgePropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(InputPropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(VariablePropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(DestPropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(SourcePropertyExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgeSrcIdExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgeTypeExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgeRankExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgeDstIdExpression *expr) { UNUSED(expr); } +// vertex/edge expression +void ExprVisitorBase::visit(VertexExpression *expr) { UNUSED(expr); } +void ExprVisitorBase::visit(EdgeExpression *expr) { UNUSED(expr); } +// case expression +void ExprVisitorBase::visit(CaseExpression *expr) { UNUSED(expr); } +// path build expression +void ExprVisitorBase::visit(PathBuildExpression *expr) { fatal(expr); } +// column expression +void ExprVisitorBase::visit(ColumnExpression *expr) { fatal(expr); } +// predicate expression +void ExprVisitorBase::visit(PredicateExpression *expr) { fatal(expr); } +// list comprehension expression +void ExprVisitorBase::visit(ListComprehensionExpression *expr) { fatal(expr); } +// reduce expression +void ExprVisitorBase::visit(ReduceExpression *expr) { fatal(expr); } +// subscript range expression +void ExprVisitorBase::visit(SubscriptRangeExpression *expr) { fatal(expr); } + +} // namespace storage +} // namespace nebula diff --git a/src/storage/ExprVisitorBase.h b/src/storage/ExprVisitorBase.h new file mode 100644 index 00000000000..015f02d5aca --- /dev/null +++ b/src/storage/ExprVisitorBase.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once +#include "common/expression/ExprVisitor.h" +namespace nebula { +namespace storage { + +class ExprVisitorBase : public ::nebula::ExprVisitor { + public: + void visit(ConstantExpression *expr) override; + void visit(UnaryExpression *expr) override; + void visit(TypeCastingExpression *expr) override; + void visit(LabelExpression *expr) override; + void visit(LabelAttributeExpression *expr) override; + // binary expression + void visit(ArithmeticExpression *expr) override; + void visit(RelationalExpression *expr) override; + void visit(SubscriptExpression *expr) override; + void visit(AttributeExpression *expr) override; + void visit(LogicalExpression *expr) override; + // function call + void visit(FunctionCallExpression *expr) override; + void visit(AggregateExpression *expr) override; + void visit(UUIDExpression *expr) override; + // variable expression + void visit(VariableExpression *expr) override; + void visit(VersionedVariableExpression *expr) override; + // container expression + void visit(ListExpression *expr) override; + void visit(SetExpression *expr) override; + void visit(MapExpression *expr) override; + // property Expression + void visit(TagPropertyExpression *expr) override; + void visit(EdgePropertyExpression *expr) override; + void visit(InputPropertyExpression *expr) override; + void visit(VariablePropertyExpression *expr) override; + void visit(DestPropertyExpression *expr) override; + void visit(SourcePropertyExpression *expr) override; + void visit(EdgeSrcIdExpression *expr) override; + void visit(EdgeTypeExpression *expr) override; + void visit(EdgeRankExpression *expr) override; + void visit(EdgeDstIdExpression *expr) override; + // vertex/edge expression + void visit(VertexExpression *expr) override; + void visit(EdgeExpression *expr) override; + // case expression + void visit(CaseExpression *expr) override; + // path build expression + void visit(PathBuildExpression *expr) override; + // column expression + void visit(ColumnExpression *expr) override; + // predicate expression + void visit(PredicateExpression *expr) override; + // list comprehension expression + void visit(ListComprehensionExpression *expr) override; + // reduce expression + void visit(ReduceExpression *expr) override; + // subscript range expression + void visit(SubscriptRangeExpression *expr) override; + + private: + using ::nebula::ExprVisitor::visit; + inline void fatal(Expression *expr) { + LOG(FATAL) << "Unexpect expression kind " << static_cast(expr->kind()) << " at storage"; + } +}; +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexDedupNode.cpp b/src/storage/exec/IndexDedupNode.cpp new file mode 100644 index 00000000000..c27faf11eec --- /dev/null +++ b/src/storage/exec/IndexDedupNode.cpp @@ -0,0 +1,93 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexDedupNode.h" +namespace nebula { +namespace storage { +IndexDedupNode::IndexDedupNode(const IndexDedupNode& node) + : IndexNode(node), dedupColumns_(node.dedupColumns_), dedupPos_(node.dedupPos_) {} + +IndexDedupNode::IndexDedupNode(RuntimeContext* context, const std::vector& dedupColumn) + : IndexNode(context, "IndexDedupNode"), dedupColumns_(dedupColumn) {} +::nebula::cpp2::ErrorCode IndexDedupNode::init(InitContext& ctx) { + for (auto& col : dedupColumns_) { + ctx.requiredColumns.insert(col); + } + // The contents of `ctx` should be the same when all child nodes are initialized, and `ctx` should + // be the same after initialization. + for (size_t i = 0; i < children_.size() - 1; i++) { + auto tmp = ctx; // + auto ret = children_[i]->init(tmp); + if (ret != ::nebula::cpp2::ErrorCode::SUCCEEDED) { + return ret; + } + } + auto ret = children_.back()->init(ctx); + if (ret != ::nebula::cpp2::ErrorCode::SUCCEEDED) { + return ret; + } + for (auto& col : dedupColumns_) { + dedupPos_.push_back(ctx.retColMap[col]); + } + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} + +::nebula::cpp2::ErrorCode IndexDedupNode::doExecute(PartitionID partId) { + currentChild_ = 0; + dedupSet_.clear(); + return IndexNode::doExecute(partId); +} + +IndexNode::Result IndexDedupNode::iterateCurrentChild(size_t currentChild) { + auto& child = *children_[currentChild]; + Result result; + do { + result = child.next(); + // error or meet end + if (!result.hasData()) { + return result; + } + auto dedupResult = dedup(result.row()); + if (!dedupResult) { + continue; + } + return result; + } while (true); +} + +IndexNode::Result IndexDedupNode::doNext() { + Result result; + while (currentChild_ < children_.size()) { + result = iterateCurrentChild(currentChild_); + // error + if (!result.success()) { + return result; + } + // finish iterate one child + if (!result.hasData()) { + currentChild_++; + continue; + } + return result; + } + return Result(); +} + +IndexDedupNode::RowWrapper::RowWrapper(const Row& row, const std::vector& posList) { + values_.reserve(posList.size()); + for (auto p : posList) { + values_.emplace_back(row[p]); + } +} + +std::unique_ptr IndexDedupNode::copy() { + return std::make_unique(*this); +} + +std::string IndexDedupNode::identify() { + return fmt::format("{}(dedup=[{}])", name_, folly::join(',', dedupColumns_)); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexDedupNode.h b/src/storage/exec/IndexDedupNode.h new file mode 100644 index 00000000000..8025c08124b --- /dev/null +++ b/src/storage/exec/IndexDedupNode.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once +#include "common/datatypes/DataSet.h" +#include "folly/container/F14Set.h" +#include "storage/exec/IndexNode.h" +namespace nebula { +namespace storage { +/** + * + * IndexDedupNode + * + * reference: IndexNode + * + * `IndexDedupNode` is the class which is used to eliminate duplicate rows of data returned by + * multiple child nodes. + * ┌───────────┐ + * │ IndexNode │ + * └─────┬─────┘ + * │ + * ┌────────┴───────┐ + * │ IndexDedupNode │ + * └────────────────┘ + * Member: + * `dedupColumns_`: columns' name which are used to dedup + * `dedupPos_` : dedup columns' position in child return row + * `dedupSet_` : the set which record the rows have been return to parent + * `currentChild_`: current iterate child + */ + +class IndexDedupNode : public IndexNode { + public: + IndexDedupNode(const IndexDedupNode& node); + IndexDedupNode(RuntimeContext* context, const std::vector& dedupColumn); + ::nebula::cpp2::ErrorCode init(InitContext& ctx) override; + std::unique_ptr copy() override; + std::string identify() override; + + private: + inline bool dedup(const Row& row); + ::nebula::cpp2::ErrorCode doExecute(PartitionID partId) override; + Result doNext() override; + Result iterateCurrentChild(size_t currentChild); + // Define RowWrapper for dedup + class RowWrapper { + public: + RowWrapper(const Row& row, const std::vector& posList); + inline const List& values() const { return values_; } + + private: + List values_; + }; + // End of RowWrapper + struct Hasher { + size_t operator()(const RowWrapper& wrapper) const { + return std::hash()(wrapper.values()); + } + }; + struct Equal { + bool operator()(const RowWrapper& a, const RowWrapper& b) const { + return a.values() == b.values(); + } + }; + std::vector dedupColumns_; + std::vector dedupPos_; + folly::F14FastSet dedupSet_; + size_t currentChild_ = 0; +}; + +/* Definition of inline function */ +inline bool IndexDedupNode::dedup(const Row& row) { + auto result = dedupSet_.emplace(row, dedupPos_); + return result.second; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexEdgeNode.h b/src/storage/exec/IndexEdgeNode.h deleted file mode 100644 index 74dea76db87..00000000000 --- a/src/storage/exec/IndexEdgeNode.h +++ /dev/null @@ -1,107 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ -#ifndef STORAGE_EXEC_INDEXEDGENODE_H_ -#define STORAGE_EXEC_INDEXEDGENODE_H_ - -#include "common/base/Base.h" -#include "storage/exec/IndexScanNode.h" -#include "storage/exec/RelNode.h" - -namespace nebula { -namespace storage { - -template -class IndexEdgeNode final : public RelNode { - public: - using RelNode::doExecute; - - IndexEdgeNode(RuntimeContext* context, - IndexScanNode* indexScanNode, - const std::vector>& schemas, - const std::string& schemaName, - int64_t limit = -1) - : context_(context), - indexScanNode_(indexScanNode), - schemas_(schemas), - schemaName_(schemaName), - limit_(limit) { - RelNode::name_ = "IndexEdgeNode"; - } - - nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { - auto ret = RelNode::doExecute(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return ret; - } - - auto ttlProp = CommonUtils::ttlProps(context_->edgeSchema_); - - data_.clear(); - std::vector edges; - auto* iter = static_cast(indexScanNode_->iterator()); - while (iter && iter->valid()) { - if (context_->isPlanKilled()) { - return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; - } - if (!iter->val().empty() && ttlProp.first) { - auto v = IndexKeyUtils::parseIndexTTL(iter->val()); - if (CommonUtils::checkDataExpiredForTTL( - context_->edgeSchema_, std::move(v), ttlProp.second.second, ttlProp.second.first)) { - iter->next(); - continue; - } - } - storage::cpp2::EdgeKey edge; - edge.set_src(iter->srcId()); - edge.set_edge_type(context_->edgeType_); - edge.set_ranking(iter->ranking()); - edge.set_dst(iter->dstId()); - edges.emplace_back(std::move(edge)); - iter->next(); - } - int64_t count = 0; - for (const auto& edge : edges) { - auto key = NebulaKeyUtils::edgeKey(context_->vIdLen(), - partId, - (*edge.src_ref()).getStr(), - context_->edgeType_, - edge.get_ranking(), - (*edge.dst_ref()).getStr()); - std::string val; - ret = context_->env()->kvstore_->get(context_->spaceId(), partId, key, &val); - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { - data_.emplace_back(std::move(key), std::move(val)); - } else if (ret == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { - continue; - } else { - return ret; - } - if (limit_ > 0 && ++count >= limit_) { - break; - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - std::vector moveData() { return std::move(data_); } - - const std::vector>& getSchemas() { - return schemas_; - } - - const std::string& getSchemaName() { return schemaName_; } - - private: - RuntimeContext* context_; - IndexScanNode* indexScanNode_; - const std::vector>& schemas_; - const std::string& schemaName_; - int64_t limit_; - std::vector data_; -}; - -} // namespace storage -} // namespace nebula -#endif // STORAGE_EXEC_INDEXEDGENODE_H_ diff --git a/src/storage/exec/IndexEdgeScanNode.cpp b/src/storage/exec/IndexEdgeScanNode.cpp new file mode 100644 index 00000000000..e9242e6e556 --- /dev/null +++ b/src/storage/exec/IndexEdgeScanNode.cpp @@ -0,0 +1,145 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/exec/IndexEdgeScanNode.h" +namespace nebula { +namespace storage { +IndexEdgeScanNode::IndexEdgeScanNode(const IndexEdgeScanNode& node) + : IndexScanNode(node), edge_(node.edge_) {} +IndexEdgeScanNode::IndexEdgeScanNode(RuntimeContext* context, + IndexID indexId, + const std::vector& columnHint, + ::nebula::kvstore::KVStore* kvstore) + : IndexScanNode(context, "IndexEdgeScanNode", indexId, columnHint, kvstore) { + getIndex = std::function([this](std::shared_ptr& index) { + auto env = this->context_->env(); + auto indexMgr = env->indexMan_; + auto indexVal = indexMgr->getEdgeIndex(this->spaceId_, this->indexId_); + if (!indexVal.ok()) { + return ::nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND; + } + index = indexVal.value(); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }); + getEdge = std::function([this](EdgeSchemas& edge) { + auto env = this->context_->env(); + auto schemaMgr = env->schemaMan_; + auto allSchema = schemaMgr->getAllVerEdgeSchema(this->spaceId_); + auto edgeType = this->index_->get_schema_id().get_edge_type(); + if (!allSchema.ok() || !allSchema.value().count(edgeType)) { + return ::nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; + } + edge = allSchema.value().at(edgeType); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }); +} + +::nebula::cpp2::ErrorCode IndexEdgeScanNode::init(InitContext& ctx) { + if (auto ret = getIndex(this->index_); UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + if (auto ret = getEdge(edge_); UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + return IndexScanNode::init(ctx); +} + +Row IndexEdgeScanNode::decodeFromIndex(folly::StringPiece key) { + std::vector values(requiredColumns_.size()); + if (colPosMap_.count(kSrc)) { + auto vId = IndexKeyUtils::getIndexSrcId(context_->vIdLen(), key); + if (context_->isIntId()) { + values[colPosMap_[kSrc]] = Value(*reinterpret_cast(vId.data())); + } else { + values[colPosMap_[kSrc]] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } + if (colPosMap_.count(kDst)) { + auto vId = IndexKeyUtils::getIndexDstId(context_->vIdLen(), key); + if (context_->isIntId()) { + values[colPosMap_[kDst]] = Value(*reinterpret_cast(vId.data())); + } else { + values[colPosMap_[kDst]] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } + if (colPosMap_.count(kRank)) { + auto rank = IndexKeyUtils::getIndexRank(context_->vIdLen(), key); + values[colPosMap_[kRank]] = Value(rank); + } + if (colPosMap_.count(kType)) { + values[colPosMap_[kType]] = Value(context_->edgeType_); + } + // Truncate the src/rank/dst at the end to facilitate obtaining the two bytes representing the + // nullableBit directly at the end when needed + key.subtract(context_->vIdLen() * 2 + sizeof(EdgeRanking)); + decodePropFromIndex(key, colPosMap_, values); + return Row(std::move(values)); +} + +nebula::cpp2::ErrorCode IndexEdgeScanNode::getBaseData(folly::StringPiece key, + std::pair& kv) { + auto vIdLen = context_->vIdLen(); + kv.first = NebulaKeyUtils::edgeKey(vIdLen, + partId_, + IndexKeyUtils::getIndexSrcId(vIdLen, key).str(), + context_->edgeType_, + IndexKeyUtils::getIndexRank(vIdLen, key), + IndexKeyUtils::getIndexDstId(vIdLen, key).str()); + return kvstore_->get(context_->spaceId(), partId_, kv.first, &kv.second); +} + +Map IndexEdgeScanNode::decodeFromBase(const std::string& key, + const std::string& value) { + Map values; + auto reader = RowReaderWrapper::getRowReader(edge_, value); + for (auto& col : requiredAndHintColumns_) { + switch (QueryUtils::toReturnColType(col)) { + case QueryUtils::ReturnColType::kType: { + values[col] = Value(context_->edgeType_); + } break; + case QueryUtils::ReturnColType::kSrc: { + auto vId = NebulaKeyUtils::getSrcId(context_->vIdLen(), key); + if (context_->isIntId()) { + values[col] = Value(*reinterpret_cast(vId.data())); + } else { + values[col] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } break; + case QueryUtils::ReturnColType::kDst: { + auto vId = NebulaKeyUtils::getDstId(context_->vIdLen(), key); + if (context_->isIntId()) { + values[col] = Value(*reinterpret_cast(vId.data())); + } else { + values[col] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } break; + case QueryUtils::ReturnColType::kRank: { + values[col] = Value(NebulaKeyUtils::getRank(context_->vIdLen(), key)); + } break; + case QueryUtils::ReturnColType::kOther: { + auto retVal = QueryUtils::readValue(reader.get(), col, edge_.back()->field(col)); + if (!retVal.ok()) { + LOG(FATAL) << "Bad value for field" << col; + } + values[col] = std::move(retVal.value()); + } break; + default: + LOG(FATAL) << "Unexpect column name:" << col; + } + } + return values; +} + +const std::vector>& +IndexEdgeScanNode::getSchema() { + return edge_; +} + +std::unique_ptr IndexEdgeScanNode::copy() { + return std::make_unique(*this); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexEdgeScanNode.h b/src/storage/exec/IndexEdgeScanNode.h new file mode 100644 index 00000000000..d21ef95faa5 --- /dev/null +++ b/src/storage/exec/IndexEdgeScanNode.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once +#include "common/base/Base.h" +#include "common/utils/NebulaKeyUtils.h" +#include "storage/exec/IndexScanNode.h" +#include "storage/exec/QueryUtils.h" +#include "storage/exec/StorageIterator.h" +namespace nebula { +namespace storage { + +/** + * IndexEdgeScanNode + * + * reference: IndexScanNode + */ + +class IndexEdgeScanNode : public IndexScanNode { + public: + IndexEdgeScanNode(const IndexEdgeScanNode& node); + IndexEdgeScanNode(RuntimeContext* context, + IndexID indexId, + const std::vector& columnHint, + ::nebula::kvstore::KVStore* kvstore); + ::nebula::cpp2::ErrorCode init(InitContext& ctx) override; + std::unique_ptr copy() override; + + private: + Row decodeFromIndex(folly::StringPiece key) override; + nebula::cpp2::ErrorCode getBaseData(folly::StringPiece key, + std::pair& kv) override; + Map decodeFromBase(const std::string& key, const std::string& value) override; + + using EdgeSchemas = std::vector>; + using IndexItem = ::nebula::meta::cpp2::IndexItem; + const EdgeSchemas& getSchema() override; + EdgeSchemas edge_; + + // Convenient for testing + std::function<::nebula::cpp2::ErrorCode(std::shared_ptr&)> getIndex; + std::function<::nebula::cpp2::ErrorCode(EdgeSchemas&)> getEdge; + + FRIEND_TEST(IndexScanTest, Edge); + friend class IndexScanTestHelper; +}; +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexFilterNode.h b/src/storage/exec/IndexFilterNode.h deleted file mode 100644 index 5cb73ced098..00000000000 --- a/src/storage/exec/IndexFilterNode.h +++ /dev/null @@ -1,170 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ -#ifndef STORAGE_EXEC_INDEXFILTERNODE_H_ -#define STORAGE_EXEC_INDEXFILTERNODE_H_ - -#include "common/base/Base.h" -#include "common/context/ExpressionContext.h" -#include "common/expression/Expression.h" -#include "storage/exec/IndexEdgeNode.h" -#include "storage/exec/IndexScanNode.h" -#include "storage/exec/IndexVertexNode.h" -#include "storage/exec/RelNode.h" - -namespace nebula { -namespace storage { - -template -class IndexFilterNode final : public RelNode { - public: - using RelNode::doExecute; - - // evalExprByIndex_ is true, all fileds in filter is in index. No need to read - // data anymore. - IndexFilterNode(RuntimeContext* context, - IndexScanNode* indexScanNode, - StorageExpressionContext* exprCtx, - Expression* exp, - bool isEdge, - int64_t limit = -1) - : context_(context), - indexScanNode_(indexScanNode), - exprCtx_(exprCtx), - filterExp_(exp), - isEdge_(isEdge), - limit_(limit) { - evalExprByIndex_ = true; - RelNode::name_ = "IndexFilterNode"; - } - - // evalExprByIndex_ is false, some fileds in filter is out of index, which - // need to read data. - IndexFilterNode(RuntimeContext* context, - IndexEdgeNode* indexEdgeNode, - StorageExpressionContext* exprCtx, - Expression* exp, - int64_t limit = -1) - : context_(context), - indexEdgeNode_(indexEdgeNode), - exprCtx_(exprCtx), - filterExp_(exp), - limit_(limit) { - evalExprByIndex_ = false; - isEdge_ = true; - } - - // evalExprByIndex_ is false, some fileds in filter is out of index, which - // need to read data. - IndexFilterNode(RuntimeContext* context, - IndexVertexNode* indexVertexNode, - StorageExpressionContext* exprCtx, - Expression* exp, - int64_t limit = -1) - : context_(context), - indexVertexNode_(indexVertexNode), - exprCtx_(exprCtx), - filterExp_(exp), - limit_(limit) { - evalExprByIndex_ = false; - isEdge_ = false; - } - - nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { - data_.clear(); - auto ret = RelNode::doExecute(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return ret; - } - std::vector data; - if (evalExprByIndex_) { - data = indexScanNode_->moveData(); - } else if (isEdge_) { - data = indexEdgeNode_->moveData(); - } else { - data = indexVertexNode_->moveData(); - } - int64_t count = 0; - for (const auto& k : data) { - if (context_->isPlanKilled()) { - return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; - } - if (evalExprByIndex_) { - if (check(k.first)) { - data_.emplace_back(k.first, k.second); - count++; - } - } else { - const auto& schemas = - isEdge_ ? indexEdgeNode_->getSchemas() : indexVertexNode_->getSchemas(); - auto reader = RowReaderWrapper::getRowReader(schemas, k.second); - if (!reader) { - continue; - } - if (check(reader.get(), k.first)) { - data_.emplace_back(k.first, k.second); - count++; - } - } - if (limit_ > 0 && count >= limit_) { - break; - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - std::vector moveData() { return std::move(data_); } - - const std::vector>& getSchemas() { - return isEdge_ ? indexEdgeNode_->getSchemas() : indexVertexNode_->getSchemas(); - } - - bool hasNullableCol() const { return exprCtx_->hasNullableCol(); } - - const std::vector& indexCols() const { return exprCtx_->indexCols(); } - - private: - bool check(const std::string& raw) { - if (filterExp_ != nullptr) { - exprCtx_->reset(raw); - auto result = filterExp_->eval(*exprCtx_); - if (result.type() == Value::Type::BOOL) { - return result.getBool(); - } else { - return false; - } - } - return false; - } - - bool check(RowReader* reader, const std::string& raw) { - if (filterExp_ != nullptr) { - exprCtx_->reset(reader, raw); - auto result = filterExp_->eval(*exprCtx_); - if (result.type() == Value::Type::BOOL) { - return result.getBool(); - } else { - return false; - } - } - return false; - } - - private: - RuntimeContext* context_; - IndexScanNode* indexScanNode_{nullptr}; - IndexEdgeNode* indexEdgeNode_{nullptr}; - IndexVertexNode* indexVertexNode_{nullptr}; - StorageExpressionContext* exprCtx_; - Expression* filterExp_; - bool isEdge_; - bool evalExprByIndex_; - int64_t limit_; - std::vector data_{}; -}; - -} // namespace storage -} // namespace nebula - -#endif // STORAGE_EXEC_INDEXFILTERNODE_H_ diff --git a/src/storage/exec/IndexLimitNode.cpp b/src/storage/exec/IndexLimitNode.cpp new file mode 100644 index 00000000000..df5afed4130 --- /dev/null +++ b/src/storage/exec/IndexLimitNode.cpp @@ -0,0 +1,51 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexLimitNode.h" +namespace nebula { +namespace storage { +IndexLimitNode::IndexLimitNode(const IndexLimitNode& node) + : IndexNode(node), offset_(node.offset_), limit_(node.limit_) {} + +IndexLimitNode::IndexLimitNode(RuntimeContext* context, uint64_t offset, uint64_t limit) + : IndexNode(context, "IndexLimitNode"), offset_(offset), limit_(limit) {} +IndexLimitNode::IndexLimitNode(RuntimeContext* context, uint64_t limit) + : IndexLimitNode(context, 0, limit) {} +nebula::cpp2::ErrorCode IndexLimitNode::doExecute(PartitionID partId) { + currentOffset_ = 0; + return children_[0]->execute(partId); +} + +IndexNode::Result IndexLimitNode::doNext() { + DCHECK_EQ(children_.size(), 1); + auto& child = *children_[0]; + while (UNLIKELY(currentOffset_ < offset_)) { + auto result = child.next(); + if (!result.hasData()) { + return result; + } + currentOffset_++; + } + if (currentOffset_ < offset_ + limit_) { + currentOffset_++; + return child.next(); + } else { + return Result(); + } +} + +std::unique_ptr IndexLimitNode::copy() { + return std::make_unique(*this); +} + +std::string IndexLimitNode::identify() { + if (offset_ > 0) { + return fmt::format("{}(offset={}, limit={})", name_, offset_, limit_); + } else { + return fmt::format("{}(limit={})", name_, limit_); + } +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexLimitNode.h b/src/storage/exec/IndexLimitNode.h new file mode 100644 index 00000000000..8811ec77a87 --- /dev/null +++ b/src/storage/exec/IndexLimitNode.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once +#include "folly/Likely.h" +#include "storage/exec/IndexNode.h" +namespace nebula { +namespace storage { +class IndexLimitNode : public IndexNode { + public: + IndexLimitNode(const IndexLimitNode& node); + IndexLimitNode(RuntimeContext* context, uint64_t offset, uint64_t limit); + IndexLimitNode(RuntimeContext* context, uint64_t limit); + std::unique_ptr copy() override; + std::string identify() override; + + private: + nebula::cpp2::ErrorCode doExecute(PartitionID partId) override; + Result doNext() override; + const uint64_t offset_, limit_; + uint64_t currentOffset_ = 0; +}; +} // namespace storage + +} // namespace nebula diff --git a/src/storage/exec/IndexNode.cpp b/src/storage/exec/IndexNode.cpp new file mode 100644 index 00000000000..0ce1ab3f401 --- /dev/null +++ b/src/storage/exec/IndexNode.cpp @@ -0,0 +1,30 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "storage/exec/IndexNode.h" + +#include "folly/Likely.h" +namespace nebula { +namespace storage { +IndexNode::IndexNode(RuntimeContext* context, const std::string& name) + : context_(context), name_(name) { + spaceId_ = context_->spaceId(); +} + +IndexNode::IndexNode(const IndexNode& node) + : context_(node.context_), spaceId_(node.spaceId_), name_(node.name_) {} + +nebula::cpp2::ErrorCode IndexNode::doExecute(PartitionID partId) { + for (auto& child : children_) { + auto ret = child->execute(partId); + if (UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + } + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexNode.h b/src/storage/exec/IndexNode.h new file mode 100644 index 00000000000..0db70ec8030 --- /dev/null +++ b/src/storage/exec/IndexNode.h @@ -0,0 +1,192 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once +#include "common/base/ErrorOr.h" +#include "common/datatypes/DataSet.h" +#include "common/time/Duration.h" +#include "folly/AtomicLinkedList.h" +#include "folly/container/F14Map.h" +#include "interface/gen-cpp2/common_types.h" +#include "storage/CommonUtils.h" +namespace nebula { +namespace storage { +/** + * IndexNode + * + * Indexnode is the base class for each node that makes up the plan tree. + * + * Member: + * `children_` : all children of the node. + * `context_` : runtime context of plan. + * `name_` : node name which should be set in derive node. + * `duration_` : used to record execution time(exclude children node's time). + * `profileDetail_` : whether record execution time or not. + * + * Function: + * The functions is divided into three parts. + * + * First part is used to build node. This part include constructor/destructor, and + * `IndexNode(const IndexNode& node)` is used to cooperate with `copy` to realize + * the deep copy of node.`copy` function needs to be implemented by the derived + * class itself. + * In fact, the build process is divided into two stages. First, the user needs to + * make various derived classes and nd organize them into a plan tree(by + * `children_`).After that, the root node of plan tree needs to call the init + * function and recursively call the init function of all child nodes, `Initcontext` + * will pass parameters between nodes to determine the data format or other + * information to be returned between nodes during execution.Note that `init` needs + * to be executed before `copy`. + * + * Second part is used to access data. + * `execute` is used to initialize some variables at the beginning of each part(e.g + * dedup set, kv iterator, etc.) + * `next` is used to iterate data. Row format has been determined during `init`. + * Batch processing and loop unrolling can be used to optimize performance if + * necessary, but there are no serious performance problems at present. + * `end` and `finish` are used to release resources at the end of execute or plan + * (e.g, external sort free disk,release schema lease if support Online DDL, commit + * write, etc.). + * However, there are no relevant requirements, so it will not be implemented for + * the time being. + * `xxx` is the interface function.It will recursive call child node's `xxx`. `doXxx` + * is the actual execution logic, and the derived class needs to override this + * function + * + * The third part is used to assist in obtaining some detailed information + */ + +using ErrorCode = ::nebula::cpp2::ErrorCode; +template +using Map = folly::F14FastMap; +template +using Set = folly::F14FastSet; +struct InitContext { + // Column required by parent node + Set requiredColumns; + // The format of the row returned to the parent node + std::vector returnColumns; + // The index of name in `returncolumns` + Map retColMap; +}; + +class IndexNode { + public: + /* Iterate result*/ + class Result { + public: + Result() : code_(ErrorCode::SUCCEEDED), empty_(true) {} + Result(const Result& result) : code_(result.code_), row_(result.row_), empty_(result.empty_) {} + Result(Result&& result) + : code_(result.code_), row_(std::move(result.row_)), empty_(result.empty_) {} + explicit Result(ErrorCode code) : code_(code), empty_(true) {} + explicit Result(Row&& row) : row_(row), empty_(false) {} + Result& operator=(Result&& result) { + this->code_ = result.code_; + this->row_ = std::move(result.row_); + this->empty_ = result.empty_; + return *this; + } + inline bool success() { return code_ == ErrorCode::SUCCEEDED; } + inline bool hasData() { return success() && empty_ == false; } + inline Row row() && { return std::move(row_); } + inline Row& row() & { return row_; } + ErrorCode code() { return code_; } + + private: + ErrorCode code_{ErrorCode::SUCCEEDED}; + Row row_; + bool empty_{true}; + }; + /* build */ + IndexNode(const IndexNode& node); + explicit IndexNode(RuntimeContext* context, const std::string& name); + virtual ~IndexNode() = default; + virtual std::unique_ptr copy() = 0; + void addChild(std::unique_ptr child) { children_.emplace_back(std::move(child)); } + const std::vector>& children() { return children_; } + virtual ::nebula::cpp2::ErrorCode init(InitContext& initCtx) { + DCHECK_EQ(children_.size(), 1); + return children_[0]->init(initCtx); + } + /* execution */ + inline nebula::cpp2::ErrorCode execute(PartitionID partId); + inline Result next(); + // inline nebula::cpp2::ErrorCode finish(); + + /* assist */ + const std::string& name() { return name_; } + void enableProfileDetail(); + virtual std::string identify() = 0; + inline const time::Duration& duration(); + + protected: + virtual Result doNext() = 0; + void beforeNext(); + void afterNext(); + virtual nebula::cpp2::ErrorCode doExecute(PartitionID partId); + void beforeExecute(); + void afterExecute(); + + RuntimeContext* context_; + GraphSpaceID spaceId_; + std::vector> children_; + std::string name_; + time::Duration duration_; + bool profileDetail_{false}; +}; + +/* Defination of inline function */ +inline IndexNode::Result IndexNode::next() { + beforeNext(); + if (context_->isPlanKilled()) { + return Result(::nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED); + } + Result ret = doNext(); + afterNext(); + return ret; +} + +inline void IndexNode::beforeNext() { + if (UNLIKELY(profileDetail_)) { + duration_.resume(); + } +} + +inline void IndexNode::afterNext() { + if (UNLIKELY(profileDetail_)) { + duration_.pause(); + } +} + +inline nebula::cpp2::ErrorCode IndexNode::execute(PartitionID partId) { + beforeExecute(); + auto ret = doExecute(partId); + afterExecute(); + return ret; +} + +inline void IndexNode::beforeExecute() { + if (UNLIKELY(profileDetail_)) { + duration_.resume(); + } +} + +inline void IndexNode::afterExecute() { + if (UNLIKELY(profileDetail_)) { + duration_.pause(); + } +} + +inline void IndexNode::enableProfileDetail() { + profileDetail_ = true; + for (auto& child : children_) { + child->enableProfileDetail(); + } +} + +inline const time::Duration& IndexNode::duration() { return duration_; } + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexOutputNode.h b/src/storage/exec/IndexOutputNode.h deleted file mode 100644 index d462c9dff42..00000000000 --- a/src/storage/exec/IndexOutputNode.h +++ /dev/null @@ -1,356 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ -#ifndef STORAGE_EXEC_INDEXOUTPUTNODE_H_ -#define STORAGE_EXEC_INDEXOUTPUTNODE_H_ - -#include "common/base/Base.h" -#include "storage/exec/IndexEdgeNode.h" -#include "storage/exec/IndexFilterNode.h" -#include "storage/exec/IndexScanNode.h" -#include "storage/exec/IndexVertexNode.h" -#include "storage/exec/RelNode.h" - -namespace nebula { -namespace storage { - -template -class IndexOutputNode final : public RelNode { - public: - using RelNode::doExecute; - - enum class IndexResultType : int8_t { - kEdgeFromIndexScan, - kEdgeFromIndexFilter, - kEdgeFromDataScan, - kEdgeFromDataFilter, - kVertexFromIndexScan, - kVertexFromIndexFilter, - kVertexFromDataScan, - kVertexFromDataFilter, - }; - - IndexOutputNode(nebula::DataSet* result, - RuntimeContext* context, - IndexScanNode* indexScanNode, - bool hasNullableCol, - const std::vector& fields) - : result_(result), - context_(context), - indexScanNode_(indexScanNode), - hasNullableCol_(hasNullableCol), - fields_(fields) { - type_ = context_->isEdge() ? IndexResultType::kEdgeFromIndexScan - : IndexResultType::kVertexFromIndexScan; - RelNode::name_ = "IndexOpuputNode"; - } - - IndexOutputNode(nebula::DataSet* result, RuntimeContext* context, IndexEdgeNode* indexEdgeNode) - : result_(result), context_(context), indexEdgeNode_(indexEdgeNode) { - type_ = IndexResultType::kEdgeFromDataScan; - RelNode::name_ = "IndexOpuputNode"; - } - - IndexOutputNode(nebula::DataSet* result, - RuntimeContext* context, - IndexVertexNode* indexVertexNode) - : result_(result), context_(context), indexVertexNode_(indexVertexNode) { - type_ = IndexResultType::kVertexFromDataScan; - RelNode::name_ = "IndexOpuputNode"; - } - - IndexOutputNode(nebula::DataSet* result, - RuntimeContext* context, - IndexFilterNode* indexFilterNode, - bool indexFilter = false) - : result_(result), context_(context), indexFilterNode_(indexFilterNode) { - hasNullableCol_ = indexFilterNode->hasNullableCol(); - fields_ = indexFilterNode_->indexCols(); - if (indexFilter) { - type_ = context_->isEdge() ? IndexResultType::kEdgeFromIndexFilter - : IndexResultType::kVertexFromIndexFilter; - } else { - type_ = context_->isEdge() ? IndexResultType::kEdgeFromDataFilter - : IndexResultType::kVertexFromDataFilter; - } - RelNode::name_ = "IndexOpuputNode"; - } - - nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { - auto ret = RelNode::doExecute(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return ret; - } - - switch (type_) { - case IndexResultType::kEdgeFromIndexScan: { - ret = collectResult(indexScanNode_->moveData()); - break; - } - case IndexResultType::kEdgeFromIndexFilter: { - ret = collectResult(indexFilterNode_->moveData()); - break; - } - case IndexResultType::kEdgeFromDataScan: { - ret = collectResult(indexEdgeNode_->moveData()); - break; - } - case IndexResultType::kEdgeFromDataFilter: { - ret = collectResult(indexFilterNode_->moveData()); - break; - } - case IndexResultType::kVertexFromIndexScan: { - ret = collectResult(indexScanNode_->moveData()); - break; - } - case IndexResultType::kVertexFromIndexFilter: { - ret = collectResult(indexFilterNode_->moveData()); - break; - } - case IndexResultType::kVertexFromDataScan: { - ret = collectResult(indexVertexNode_->moveData()); - break; - } - case IndexResultType::kVertexFromDataFilter: { - ret = collectResult(indexFilterNode_->moveData()); - break; - } - } - return ret; - } - - private: - nebula::cpp2::ErrorCode collectResult(const std::vector& data) { - if (context_->isPlanKilled()) { - return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; - } - auto ret = nebula::cpp2::ErrorCode::SUCCEEDED; - switch (type_) { - case IndexResultType::kEdgeFromIndexScan: - case IndexResultType::kEdgeFromIndexFilter: { - ret = edgeRowsFromIndex(data); - break; - } - case IndexResultType::kEdgeFromDataScan: - case IndexResultType::kEdgeFromDataFilter: { - ret = edgeRowsFromData(data); - break; - } - case IndexResultType::kVertexFromIndexScan: - case IndexResultType::kVertexFromIndexFilter: { - ret = vertexRowsFromIndex(data); - break; - } - case IndexResultType::kVertexFromDataScan: - case IndexResultType::kVertexFromDataFilter: { - ret = vertexRowsFromData(data); - break; - } - } - return ret; - } - - nebula::cpp2::ErrorCode vertexRowsFromData(const std::vector& data) { - const auto& schemas = type_ == IndexResultType::kVertexFromDataScan - ? indexVertexNode_->getSchemas() - : indexFilterNode_->getSchemas(); - if (schemas.empty()) { - return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; - } - for (const auto& val : data) { - Row row; - auto reader = RowReaderWrapper::getRowReader(schemas, val.second); - if (!reader) { - VLOG(1) << "Can't get tag reader"; - return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; - } - for (const auto& col : result_->colNames) { - auto ret = addIndexValue(row, reader.get(), val, col, schemas.back().get()); - if (!ret.ok()) { - return nebula::cpp2::ErrorCode::E_INVALID_DATA; - } - } - result_->rows.emplace_back(std::move(row)); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - nebula::cpp2::ErrorCode vertexRowsFromIndex(const std::vector& data) { - for (const auto& val : data) { - Row row; - for (const auto& col : result_->colNames) { - auto ret = addIndexValue(row, val, col); - if (!ret.ok()) { - return nebula::cpp2::ErrorCode::E_INVALID_DATA; - } - } - result_->rows.emplace_back(std::move(row)); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - nebula::cpp2::ErrorCode edgeRowsFromData(const std::vector& data) { - const auto& schemas = type_ == IndexResultType::kEdgeFromDataScan - ? indexEdgeNode_->getSchemas() - : indexFilterNode_->getSchemas(); - if (schemas.empty()) { - return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; - } - for (const auto& val : data) { - Row row; - auto reader = RowReaderWrapper::getRowReader(schemas, val.second); - if (!reader) { - VLOG(1) << "Can't get tag reader"; - return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; - } - for (const auto& col : result_->colNames) { - auto ret = addIndexValue(row, reader.get(), val, col, schemas.back().get()); - if (!ret.ok()) { - return nebula::cpp2::ErrorCode::E_INVALID_DATA; - } - } - result_->rows.emplace_back(std::move(row)); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - nebula::cpp2::ErrorCode edgeRowsFromIndex(const std::vector& data) { - for (const auto& val : data) { - Row row; - for (const auto& col : result_->colNames) { - auto ret = addIndexValue(row, val, col); - if (!ret.ok()) { - return nebula::cpp2::ErrorCode::E_INVALID_DATA; - } - } - result_->rows.emplace_back(std::move(row)); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - // Add the value by data val - Status addIndexValue(Row& row, - RowReader* reader, - const kvstore::KV& data, - const std::string& col, - const meta::NebulaSchemaProvider* schema) { - switch (QueryUtils::toReturnColType(col)) { - case QueryUtils::ReturnColType::kVid: { - auto vId = NebulaKeyUtils::getVertexId(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(vId.data())); - } else { - row.emplace_back(vId.subpiece(0, vId.find_first_of('\0')).toString()); - } - break; - } - case QueryUtils::ReturnColType::kTag: { - row.emplace_back(NebulaKeyUtils::getTagId(context_->vIdLen(), data.first)); - break; - } - case QueryUtils::ReturnColType::kSrc: { - auto src = NebulaKeyUtils::getSrcId(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(src.data())); - } else { - row.emplace_back(src.subpiece(0, src.find_first_of('\0')).toString()); - } - break; - } - case QueryUtils::ReturnColType::kType: { - row.emplace_back(NebulaKeyUtils::getEdgeType(context_->vIdLen(), data.first)); - break; - } - case QueryUtils::ReturnColType::kRank: { - row.emplace_back(NebulaKeyUtils::getRank(context_->vIdLen(), data.first)); - break; - } - case QueryUtils::ReturnColType::kDst: { - auto dst = NebulaKeyUtils::getDstId(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(dst.data())); - } else { - row.emplace_back(dst.subpiece(0, dst.find_first_of('\0')).toString()); - } - break; - } - default: { - auto retVal = QueryUtils::readValue(reader, col, schema); - if (!retVal.ok()) { - VLOG(3) << "Bad value for field : " << col; - return retVal.status(); - } - row.emplace_back(std::move(retVal.value())); - } - } - return Status::OK(); - } - - // Add the value by index key - Status addIndexValue(Row& row, const kvstore::KV& data, const std::string& col) { - switch (QueryUtils::toReturnColType(col)) { - case QueryUtils::ReturnColType::kVid: { - auto vId = IndexKeyUtils::getIndexVertexID(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(vId.data())); - } else { - row.emplace_back(vId.subpiece(0, vId.find_first_of('\0')).toString()); - } - break; - } - case QueryUtils::ReturnColType::kTag: { - row.emplace_back(context_->tagId_); - break; - } - case QueryUtils::ReturnColType::kSrc: { - auto src = IndexKeyUtils::getIndexSrcId(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(src.data())); - } else { - row.emplace_back(src.subpiece(0, src.find_first_of('\0')).toString()); - } - break; - } - case QueryUtils::ReturnColType::kType: { - row.emplace_back(context_->edgeType_); - break; - } - case QueryUtils::ReturnColType::kRank: { - row.emplace_back(IndexKeyUtils::getIndexRank(context_->vIdLen(), data.first)); - break; - } - case QueryUtils::ReturnColType::kDst: { - auto dst = IndexKeyUtils::getIndexDstId(context_->vIdLen(), data.first); - if (context_->isIntId()) { - row.emplace_back(*reinterpret_cast(dst.data())); - } else { - row.emplace_back(dst.subpiece(0, dst.find_first_of('\0')).toString()); - } - break; - } - default: { - auto v = IndexKeyUtils::getValueFromIndexKey( - context_->vIdLen(), data.first, col, fields_, context_->isEdge(), hasNullableCol_); - row.emplace_back(std::move(v)); - } - } - return Status::OK(); - } - - private: - nebula::DataSet* result_; - RuntimeContext* context_; - IndexResultType type_; - IndexScanNode* indexScanNode_{nullptr}; - IndexEdgeNode* indexEdgeNode_{nullptr}; - IndexVertexNode* indexVertexNode_{nullptr}; - IndexFilterNode* indexFilterNode_{nullptr}; - bool hasNullableCol_{}; - std::vector fields_; -}; - -} // namespace storage -} // namespace nebula - -#endif // STORAGE_EXEC_INDEXOUTPUTNODE_H_ diff --git a/src/storage/exec/IndexProjectionNode.cpp b/src/storage/exec/IndexProjectionNode.cpp new file mode 100644 index 00000000000..946edafb418 --- /dev/null +++ b/src/storage/exec/IndexProjectionNode.cpp @@ -0,0 +1,63 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexProjectionNode.h" +namespace nebula { +namespace storage { +IndexProjectionNode::IndexProjectionNode(const IndexProjectionNode& node) + : IndexNode(node), requiredColumns_(node.requiredColumns_), colPos_(node.colPos_) {} +IndexProjectionNode::IndexProjectionNode(RuntimeContext* context, + const std::vector& requiredColumns) + : IndexNode(context, "IndexProjectionNode"), requiredColumns_(requiredColumns) {} +nebula::cpp2::ErrorCode IndexProjectionNode::init(InitContext& ctx) { + DCHECK_EQ(children_.size(), 1); + for (auto& col : requiredColumns_) { + ctx.requiredColumns.insert(col); + } + auto ret = children_[0]->init(ctx); + if (UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + for (auto& col : requiredColumns_) { + auto iter = ctx.retColMap.find(col); + DCHECK(iter != ctx.retColMap.end()); + colPos_[col] = iter->second; + } + ctx.returnColumns = requiredColumns_; + ctx.retColMap.clear(); + for (size_t i = 0; i < ctx.returnColumns.size(); i++) { + ctx.retColMap[ctx.returnColumns[i]] = i; + } + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} + +IndexNode::Result IndexProjectionNode::doNext() { + DCHECK_EQ(children_.size(), 1); + auto& child = *children_[0]; + Result result = child.next(); + if (result.hasData()) { + result = Result(project(std::move(result).row())); + } + return result; +} + +Row IndexProjectionNode::project(Row&& row) { + Row ret; + ret.reserve(requiredColumns_.size()); + for (auto& col : requiredColumns_) { + ret.emplace_back(std::move(row[colPos_[col]])); + } + return ret; +} + +std::unique_ptr IndexProjectionNode::copy() { + return std::make_unique(*this); +} + +std::string IndexProjectionNode::identify() { + return fmt::format("{}(projectColumn=[{}])", name_, folly::join(",", requiredColumns_)); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexProjectionNode.h b/src/storage/exec/IndexProjectionNode.h new file mode 100644 index 00000000000..736971620e9 --- /dev/null +++ b/src/storage/exec/IndexProjectionNode.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once + +#include "folly/Likely.h" +#include "storage/exec/IndexNode.h" +namespace nebula { +namespace storage { +/** + * + * IndexProjectionNode + * + * reference: IndexNode + * + * `IndexProjectionNode` is the class which is used to reformat the row to ensure that the format of + * the returned row meets the requirements of RPC request. + * ┌───────────┐ + * │ IndexNode │ + * └─────┬─────┘ + * │ + * ┌──────────┴──────────┐ + * │ IndexProjectionNode │ + * └─────────────────────┘ + * + * Member: + * `requiredColumns_` : Row format required by parent node + * `colPos_` : each column position in child node return row + */ +class IndexProjectionNode : public IndexNode { + public: + IndexProjectionNode(const IndexProjectionNode& node); + IndexProjectionNode(RuntimeContext* context, const std::vector& requiredColumns); + nebula::cpp2::ErrorCode init(InitContext& ctx) override; + std::unique_ptr copy() override; + std::string identify() override; + + private: + Result doNext() override; + Row project(Row&& row); + std::vector requiredColumns_; + Map colPos_; +}; +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexScanNode.cpp b/src/storage/exec/IndexScanNode.cpp new file mode 100644 index 00000000000..33f344844ac --- /dev/null +++ b/src/storage/exec/IndexScanNode.cpp @@ -0,0 +1,547 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexScanNode.h" + +namespace nebula { +namespace storage { +// Define of Path +Path::Path(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen) + : index_(index), schema_(schema), hints_(hints) { + bool nullFlag = false; + for (auto field : index->get_fields()) { + bool tmp = field.nullable_ref().value_or(false); + nullable_.push_back(tmp); + nullFlag |= tmp; + // TODO: improve performance of compute nullable offset in index_key + auto type = IndexKeyUtils::toValueType(field.get_type().get_type()); + auto tmpStr = IndexKeyUtils::encodeNullValue(type, field.get_type().get_type_length()); + index_nullable_offset_ += tmpStr.size(); + totalKeyLength_ += tmpStr.size(); + } + if (!nullFlag) { + nullable_.clear(); + } else { + totalKeyLength_ += 2; + } + if (index_->get_schema_id().tag_id_ref().has_value()) { + totalKeyLength_ += vidLen; + suffixLength_ = vidLen; + } else { + totalKeyLength_ += vidLen * 2 + sizeof(EdgeRanking); + suffixLength_ = vidLen * 2 + sizeof(EdgeRanking); + } +} + +std::unique_ptr Path::make(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen) { + std::unique_ptr ret; + if (hints.empty() || hints.back().get_scan_type() == cpp2::ScanType::PREFIX) { + ret.reset(new PrefixPath(index, schema, hints, vidLen)); + + } else { + ret.reset(new RangePath(index, schema, hints, vidLen)); + } + return ret; +} + +QualifiedStrategy::Result Path::qualified(const folly::StringPiece& key) { + return strategySet_(key); +} + +std::string Path::encodeValue(const Value& value, + const ColumnTypeDef& colDef, + size_t index, + std::string& key) { + std::string val; + bool isNull = false; + if (colDef.get_type() == ::nebula::cpp2::PropertyType::GEOGRAPHY) { + CHECK_EQ(value.type(), Value::Type::STRING); + val = value.getStr(); + } else if (value.type() == Value::Type::STRING) { + val = IndexKeyUtils::encodeValue(value, *colDef.get_type_length()); + if (val.back() != '\0') { + strategySet_.insert(QualifiedStrategy::constant()); + } + } else if (value.type() == Value::Type::NULLVALUE) { + auto vtype = IndexKeyUtils::toValueType(colDef.get_type()); + val = IndexKeyUtils::encodeNullValue(vtype, colDef.get_type_length()); + isNull = true; + } else { + val = IndexKeyUtils::encodeValue(value); + } + // If the current colDef can be null, then it is necessary to additionally determine whether the + // corresponding value under a nullable is null when parsing the key (the encoding of the maximum + // value, for example, the encoding of INT_MAX and null are the same, both are 8*' \xFF') + if (!nullable_.empty() && nullable_[index] == true) { + if (isNull) { + strategySet_.insert(QualifiedStrategy::checkNull(index, index_nullable_offset_)); + } else { + strategySet_.insert(QualifiedStrategy::checkNull(index, index_nullable_offset_)); + } + } else if (isNull) { + strategySet_.insert(QualifiedStrategy::constant()); + } + key.append(val); + return val; +} + +const std::string& Path::toString() { return serializeString_; } + +// End of Path + +// Define of RangePath +RangePath::RangePath(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen) + : Path(index, schema, hints, vidLen) { + buildKey(); +} + +void RangePath::resetPart(PartitionID partId) { + std::string p = IndexKeyUtils::indexPrefix(partId); + startKey_ = startKey_.replace(0, p.size(), p); + endKey_ = endKey_.replace(0, p.size(), p); +} + +QualifiedStrategy::Result RangePath::qualified(const Map& rowData) { + for (size_t i = 0; i < hints_.size() - 1; i++) { + auto& hint = hints_[i]; + if (hint.get_begin_value() != rowData.at(hint.get_column_name())) { + return QualifiedStrategy::INCOMPATIBLE; + } + } + auto& hint = hints_.back(); + // TODO(hs.zhang): improve performance.Check include or not during build key. + if (hint.begin_value_ref().is_set()) { + bool ret = includeStart_ ? hint.get_begin_value() <= rowData.at(hint.get_column_name()) + : hint.get_begin_value() < rowData.at(hint.get_column_name()); + if (!ret) { + return QualifiedStrategy::INCOMPATIBLE; + } + } + if (hint.end_value_ref().is_set()) { + bool ret = includeEnd_ ? hint.get_end_value() >= rowData.at(hint.get_column_name()) + : hint.get_end_value() > rowData.at(hint.get_column_name()); + if (!ret) { + return QualifiedStrategy::INCOMPATIBLE; + } + } + return QualifiedStrategy::COMPATIBLE; +} + +void RangePath::buildKey() { + std::string commonIndexPrefix; + commonIndexPrefix.append(IndexKeyUtils::indexPrefix(0, index_->index_id_ref().value())); + auto fieldIter = index_->get_fields().begin(); + for (size_t i = 0; i < hints_.size() - 1; i++, fieldIter++) { + auto& hint = hints_[i]; + CHECK(fieldIter->get_name() == hint.get_column_name()); + auto type = IndexKeyUtils::toValueType(fieldIter->get_type().get_type()); + CHECK(type != Value::Type::STRING || fieldIter->get_type().type_length_ref().has_value()); + encodeValue(hint.get_begin_value(), fieldIter->get_type(), i, commonIndexPrefix); + serializeString_ += + fmt::format("{}={}, ", hint.get_column_name(), hint.get_begin_value().toString()); + } + auto& hint = hints_.back(); + size_t index = hints_.size() - 1; + // The first n-1 columnHint has been spelled out the common prefix, and then according to the nth + // columnHint to determine the RangePath Scan range [a, b). Note that [a, b) must be the range of + // include begin but exclude end. + // [startKey, endKey) = common prefix + [a, b) + auto [a, b] = encodeRange(hint, fieldIter->get_type(), index, commonIndexPrefix.size()); + // left will be `[a`,`(a`, or `[INF` + std::string left = + hint.begin_value_ref().is_set() + ? fmt::format( + "{}{}", hint.get_include_begin() ? '[' : '(', hint.get_begin_value().toString()) + : "[-INF"; + // left will be `b]`,`b)`, or `[INF` + std::string right = + hint.end_value_ref().is_set() + ? fmt::format("{}{}", hint.get_end_value().toString(), hint.get_include_end() ? ']' : ')') + : "INF]"; + serializeString_ += fmt::format("{}={},{}", hint.get_column_name(), left, right); + startKey_ = commonIndexPrefix + a; + endKey_ = commonIndexPrefix + b; + // If `end_value` is not set, `b` will be empty. So `endKey_` should append '\xFF' until + // endKey_.size() > `totalKeyLength_` to indicate positive infinity prefixed with + // `commonIndexPrefix` + if (!hint.end_value_ref().is_set()) { + endKey_.append(totalKeyLength_ - endKey_.size() + 1, '\xFF'); + } +} + +std::tuple RangePath::encodeRange( + const cpp2::IndexColumnHint& hint, + const nebula::meta::cpp2::ColumnTypeDef& colTypeDef, + size_t colIndex, + size_t offset) { + std::string startKey, endKey; + bool needCheckNullable = !nullable_.empty() && nullable_[colIndex]; + if (hint.end_value_ref().is_set()) { + includeEnd_ = hint.get_include_end(); + auto tmp = encodeEndValue(hint.get_end_value(), colTypeDef, endKey, offset); + if (memcmp(tmp.data(), std::string(tmp.size(), '\xFF').data(), tmp.size()) != 0) { + needCheckNullable &= false; + } + } + if (hint.begin_value_ref().is_set()) { + includeStart_ = hint.get_include_begin(); + encodeBeginValue(hint.get_begin_value(), colTypeDef, startKey, offset); + } + if (UNLIKELY(needCheckNullable)) { + strategySet_.insert(QualifiedStrategy::checkNull(colIndex, index_nullable_offset_)); + } + if (UNLIKELY(colTypeDef.get_type() == nebula::cpp2::PropertyType::GEOGRAPHY)) { + strategySet_.insert(QualifiedStrategy::dedupGeoIndex(suffixLength_)); + } + return {startKey, endKey}; +} + +std::string RangePath::encodeBeginValue(const Value& value, + const ColumnTypeDef& colDef, + std::string& key, + size_t offset) { + std::string val; + bool greater = !includeStart_; + CHECK_NE(value.type(), Value::Type::NULLVALUE); + if (colDef.get_type() == ::nebula::cpp2::PropertyType::GEOGRAPHY) { + val = value.getStr(); + } else if (value.type() == Value::Type::STRING) { + bool truncated = false; + val = encodeString(value, *colDef.get_type_length(), truncated); + greater &= !truncated; + if (UNLIKELY(truncated)) { + strategySet_.insert(QualifiedStrategy::compareTruncated(val, offset)); + } + } else if (value.type() == Value::Type::FLOAT) { + bool isNaN = false; + val = encodeFloat(value, isNaN); + greater |= isNaN; + // TODO(hs.zhang): Optimize the logic of judging NaN + strategySet_.insert(QualifiedStrategy::checkNaN(offset)); + } else { + val = IndexKeyUtils::encodeValue(value); + } + if (greater) { + val.append(suffixLength_ + 1, '\xFF'); + } + key += val; + return val; +} + +std::string RangePath::encodeEndValue(const Value& value, + const ColumnTypeDef& colDef, + std::string& key, + size_t offset) { + CHECK_NE(value.type(), Value::Type::NULLVALUE); + std::string val; + bool greater = includeEnd_; + if (colDef.get_type() == ::nebula::cpp2::PropertyType::GEOGRAPHY) { + val = value.getStr(); + } else if (value.type() == Value::Type::STRING) { + bool truncated = false; + val = encodeString(value, *colDef.get_type_length(), truncated); + greater |= truncated; + if (UNLIKELY(truncated)) { + strategySet_.insert(QualifiedStrategy::compareTruncated(val, offset)); + } + } else if (value.type() == Value::Type::FLOAT) { + bool isNaN = false; + val = encodeFloat(value, isNaN); + greater |= isNaN; + if (UNLIKELY(isNaN)) { + strategySet_.insert(QualifiedStrategy::checkNaN(offset)); + } + } else { + val = IndexKeyUtils::encodeValue(value); + } + if (greater) { + val.append(suffixLength_ + 1, '\xFF'); + } + key += val; + return val; +} + +inline std::string RangePath::encodeString(const Value& value, size_t len, bool& truncated) { + std::string val = IndexKeyUtils::encodeValue(value); + if (val.size() < len) { + val.append(len - val.size(), '\x00'); + } else { + val = val.substr(0, len); + truncated = true; + } + return val; +} + +std::string RangePath::encodeFloat(const Value& value, bool& isNaN) { + std::string val = IndexKeyUtils::encodeValue(value); + // check NaN + if (UNLIKELY(memcmp(val.data(), "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", val.size()) == 0)) { + isNaN = true; + } + return val; +} + +// End of RangePath + +// Define of PrefixPath +PrefixPath::PrefixPath(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen) + : Path(index, schema, hints, vidLen) { + buildKey(); +} + +QualifiedStrategy::Result PrefixPath::qualified(const Map& rowData) { + for (auto& hint : hints_) { + if (hint.get_begin_value() != rowData.at(hint.get_column_name())) { + return QualifiedStrategy::INCOMPATIBLE; + } + } + return QualifiedStrategy::COMPATIBLE; +} + +void PrefixPath::resetPart(PartitionID partId) { + std::string p = IndexKeyUtils::indexPrefix(partId); + prefix_ = prefix_.replace(0, p.size(), p); +} + +void PrefixPath::buildKey() { + std::string common; + common.append(IndexKeyUtils::indexPrefix(0, index_->index_id_ref().value())); + auto fieldIter = index_->get_fields().begin(); + for (size_t i = 0; i < hints_.size(); i++, fieldIter++) { + auto& hint = hints_[i]; + CHECK(fieldIter->get_name() == hint.get_column_name()); + auto type = IndexKeyUtils::toValueType(fieldIter->get_type().get_type()); + CHECK(type != Value::Type::STRING || fieldIter->get_type().type_length_ref().has_value()); + encodeValue(hint.get_begin_value(), fieldIter->get_type(), i, common); + serializeString_ += + fmt::format("{}={}, ", hint.get_column_name(), hint.get_begin_value().toString()); + } + for (; fieldIter != index_->get_fields().end(); fieldIter++) { + if (UNLIKELY(fieldIter->get_type().get_type() == nebula::cpp2::PropertyType::GEOGRAPHY)) { + strategySet_.insert(QualifiedStrategy::dedupGeoIndex(suffixLength_)); + break; + } + } + prefix_ = std::move(common); +} + +// End of PrefixPath +// Define of IndexScan + +IndexScanNode::IndexScanNode(const IndexScanNode& node) + : IndexNode(node), + partId_(node.partId_), + indexId_(node.indexId_), + index_(node.index_), + indexNullable_(node.indexNullable_), + columnHints_(node.columnHints_), + kvstore_(node.kvstore_), + requiredColumns_(node.requiredColumns_), + requiredAndHintColumns_(node.requiredAndHintColumns_), + ttlProps_(node.ttlProps_), + needAccessBase_(node.needAccessBase_), + colPosMap_(node.colPosMap_) { + if (node.path_->isRange()) { + path_ = std::make_unique(*dynamic_cast(node.path_.get())); + } else { + path_ = std::make_unique(*dynamic_cast(node.path_.get())); + } +} + +::nebula::cpp2::ErrorCode IndexScanNode::init(InitContext& ctx) { + DCHECK(requiredColumns_.empty()); + ttlProps_ = CommonUtils::ttlProps(getSchema().back().get()); + requiredAndHintColumns_ = ctx.requiredColumns; + auto schema = getSchema().back(); + for (auto& hint : columnHints_) { + requiredAndHintColumns_.insert(hint.get_column_name()); + } + for (auto& col : ctx.requiredColumns) { + requiredColumns_.push_back(col); + } + ctx.returnColumns = requiredColumns_; + for (size_t i = 0; i < ctx.returnColumns.size(); i++) { + ctx.retColMap[ctx.returnColumns[i]] = i; + } + colPosMap_ = ctx.retColMap; + // Analyze whether the scan needs to access base data. + // TODO(hs.zhang): The performance is better to judge based on whether the string is truncated + auto tmp = ctx.requiredColumns; + for (auto& field : index_->get_fields()) { + if (field.get_type().get_type() == ::nebula::cpp2::PropertyType::FIXED_STRING) { + continue; + } + if (field.get_type().get_type() == ::nebula::cpp2::PropertyType::GEOGRAPHY) { + continue; + } + tmp.erase(field.get_name()); + } + tmp.erase(kVid); + tmp.erase(kTag); + tmp.erase(kRank); + tmp.erase(kSrc); + tmp.erase(kDst); + tmp.erase(kType); + needAccessBase_ = !tmp.empty(); + path_ = Path::make(index_.get(), getSchema().back().get(), columnHints_, context_->vIdLen()); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode IndexScanNode::doExecute(PartitionID partId) { + partId_ = partId; + auto ret = resetIter(partId); + return ret; +} + +IndexNode::Result IndexScanNode::doNext() { + for (; iter_ && iter_->valid(); iter_->next()) { + if (!checkTTL()) { + continue; + } + auto q = path_->qualified(iter_->key()); + if (q == QualifiedStrategy::INCOMPATIBLE) { + continue; + } + bool compatible = q == QualifiedStrategy::COMPATIBLE; + if (compatible && !needAccessBase_) { + auto key = iter_->key().toString(); + iter_->next(); + Row row = decodeFromIndex(key); + return Result(std::move(row)); + } + std::pair kv; + auto ret = getBaseData(iter_->key(), kv); + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { // do nothing + } else if (ret == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { + if (LIKELY(!fatalOnBaseNotFound_)) { + LOG(WARNING) << "base data not found"; + } else { + LOG(FATAL) << "base data not found"; + } + continue; + } else { + return Result(ret); + } + Map rowData = decodeFromBase(kv.first, kv.second); + if (!compatible) { + q = path_->qualified(rowData); + CHECK(q != QualifiedStrategy::UNCERTAIN); + if (q == QualifiedStrategy::INCOMPATIBLE) { + continue; + } + } + Row row; + for (auto& col : requiredColumns_) { + row.emplace_back(std::move(rowData.at(col))); + } + iter_->next(); + return Result(std::move(row)); + } + return Result(); +} + +bool IndexScanNode::checkTTL() { + if (iter_->val().empty() || ttlProps_.first == false) { + return true; + } + auto v = IndexKeyUtils::parseIndexTTL(iter_->val()); + if (CommonUtils::checkDataExpiredForTTL(getSchema().back().get(), + std::move(v), + ttlProps_.second.second, + ttlProps_.second.first)) { + return false; + } + return true; +} + +nebula::cpp2::ErrorCode IndexScanNode::resetIter(PartitionID partId) { + path_->resetPart(partId); + nebula::cpp2::ErrorCode ret = nebula::cpp2::ErrorCode::SUCCEEDED; + if (path_->isRange()) { + auto rangePath = dynamic_cast(path_.get()); + kvstore_->range(spaceId_, partId, rangePath->getStartKey(), rangePath->getEndKey(), &iter_); + } else { + auto prefixPath = dynamic_cast(path_.get()); + ret = kvstore_->prefix(spaceId_, partId, prefixPath->getPrefixKey(), &iter_); + } + return ret; +} + +void IndexScanNode::decodePropFromIndex(folly::StringPiece key, + const Map& colPosMap, + std::vector& values) { + if (colPosMap.empty()) { + return; + } + size_t offset = sizeof(PartitionID) + sizeof(IndexID); + std::bitset<16> nullableBit; + int8_t nullableColPosit = 15; + if (indexNullable_) { + auto bitOffset = key.size() - context_->vIdLen() - sizeof(uint16_t); + auto v = *reinterpret_cast(key.data() + bitOffset); + nullableBit = v; + } + for (auto& field : index_->get_fields()) { + int len = 0; + auto type = IndexKeyUtils::toValueType(field.type.get_type()); + switch (type) { + case Value::Type::BOOL: + len = sizeof(bool); + break; + case Value::Type::INT: + len = sizeof(int64_t); + break; + case Value::Type::FLOAT: + len = sizeof(double); + break; + case Value::Type::STRING: + len = *field.type.get_type_length(); + break; + case Value::Type::TIME: + len = sizeof(int8_t) * 3 + sizeof(int32_t); + break; + case Value::Type::DATE: + len = sizeof(int8_t) * 2 + sizeof(int16_t); + break; + case Value::Type::DATETIME: + len = sizeof(int32_t) + sizeof(int16_t) + sizeof(int8_t) * 5; + break; + case Value::Type::GEOGRAPHY: // colPosMap will never need GEOGRAPHY type + len = 8; + break; + default: + LOG(FATAL) << "Unexpect value type:" << int(field.type.get_type()); + } + if (colPosMap.count(field.get_name())) { + if (indexNullable_ && nullableBit.test(nullableColPosit)) { + values[colPosMap.at(field.get_name())] = Value(NullType::__NULL__); + } else { + values[colPosMap.at(field.get_name())] = + IndexKeyUtils::decodeValue(key.subpiece(offset, len), type); + } + } + offset += len; + nullableColPosit -= 1; + } +} + +std::string IndexScanNode::identify() { + return fmt::format("{}(IndexID={}, Path=({}))", name_, indexId_, path_->toString()); +} + +// End of IndexScan +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexScanNode.h b/src/storage/exec/IndexScanNode.h index 4c6f7b2dc79..fe8838aa4f8 100644 --- a/src/storage/exec/IndexScanNode.h +++ b/src/storage/exec/IndexScanNode.h @@ -1,186 +1,432 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* Copyright (c) 2021 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License. */ +#pragma once +#include -#ifndef STORAGE_EXEC_INDEXSCANNODE_H_ -#define STORAGE_EXEC_INDEXSCANNODE_H_ +#include +#include #include "common/base/Base.h" -#include "storage/exec/RelNode.h" -#include "storage/exec/StorageIterator.h" - +#include "common/datatypes/DataSet.h" +#include "common/utils/IndexKeyUtils.h" +#include "interface/gen-cpp2/meta_types.h" +#include "interface/gen-cpp2/storage_types.h" +#include "storage/CommonUtils.h" +#include "storage/exec/IndexNode.h" namespace nebula { namespace storage { -template -class IndexScanNode : public RelNode { - public: - using RelNode::doExecute; +/** + * + * IndexScanNode + * + * reference: IndexNode, IndexVertexScanNode, IndexEdgeScanNode + * + * `IndexScanNode` is the base class of the node which need to access disk. It has two derive + * class `IndexVertexScanNode` and `IndexEdgeScanNode` + * + * ┌───────────┐ + * │ IndexNode │ + * └─────┬─────┘ + * │ + * ┌───────┴───────┐ + * │ IndexScanNode │ + * └───────┬───────┘ + * ┌───────────┴────────────┐ + * ┌──────────┴──────────┐ ┌───────────┴─────────┐ + * │ IndexVertexScanNode │ │ IndexEdgeScanNode │ + * └─────────────────────┘ └─────────────────────┘ + * + * `IndexScanNode` will access index data, and then access base data if necessary. + * + * Member: + * `indexId_` : index_ in this Node to access + * `partId_` : part to access.It will be modify while `doExecute` + * `index_` : index defination + * `indexNullable_` : if index contain nullable field or not + * `columnHints_` : + * `path_` : + * `iter_` : current kvstore iterator.It while be reseted `doExecute` and iterated + * during `doNext` + * `kvstore_` : server kvstore + * `requiredColumns_` : row format that `doNext` needs to return + * `requiredAndHintColumns_`: columns that `decodeFromBase` needs to decode + * `ttlProps` : ttl properties `needAccesBase_` : if need + * `fatalOnBaseNotFound_` : for debug + * + * Function: + * `decodePropFromIndex` : decode properties from Index key.It will be called by + * `decodeFromIndex` + * `decodeFromIndex` : decode all column in `requiredColumns_` by index + * key-value. + * `getBaseData` : get key-value of base data `decodeFromBase` : get + * all values that `requiredAndHintColumns_` required + * `checkTTL` : check data is + * expired or not + * ------------------------------------------------------------- + * + * Path + * + * `Path` is the most important part of `IndexScanNode`. By analyzing `ColumnHint`, it obtains + * the mode(Prefix or Range) and range(key of Prefix or [start,end) of Range) of keys that + * `IndexScanNode` need to query in kvstore. + * + * `Path` not only generate the key to access, but also `qualified` whether the key complies with + * the columnhint constraint or not.For example, if there is a truncated string index, we cannot + * simply compare bytes to determine whether the current key complies with the columnhints + * constraint, the result of `qulified(bytes)` should be `UNCERTAIN` and `IndexScanNode` will + * access base data then `Path` reconfirm `ColumnHint` constraint by `qulified(RowData)`. In + * addition to the above examples, there are other cases to deal with.`Path` and it's derive class + * will dynamic different strategy by `ColumnHint`,`IndexItem`,and `Schema`.All strategy will be + * added to `QFList_`(QualifiedFunctionList) during `buildKey`, and executed during `qualified`. + * + * `Path` whild be reseted when `IndexScanNode` execute on a new part. + * + * It should be noted that the range generated by `rangepath` is a certain left included and right + * excluded interval,like [startKey_, endKey_), although `ColumnHint` may have many different + * constraint ranges(e.g., (x, y],(INF,y),(x,INF)). Because the length of index key is fixed, the + * way to obtain **the smallest key greater than 'x'** is to append several '\xFF' after until the + * length of 'x' is greater than the length of the indexkey. + * + * + * Member: + * `QFList_` : all Qualified strategy need to executed during qualified + * `nullable_` : if `index_` contain nullable field, `nullable_[i]` is equal to + * `index_->fields[i].nullable`,else `nullable_` is empty + * `index_nullable_offset_` : Participate in the index key encode diagram + * `totalKeyLength_` : Participate in the index key encode diagram + * `suffixLength_` : Participate in the index key encode diagram + * `serializeString_` : a string express path + * + * Index Key Encode: + * ┌──────┬─────────────┬────────────────┬──────────┬─────────────────────────────────────────┐ + * │ type | PartitionID | Indexed Values | nullable | suffix({vid} or {srcId,rank,dstId}) | + * │ 1byte| 3 bytes | n bytes | 0/2 bytes| vid.length or vid.length*2+sizeof(rank) | + * └──────┴─────────────┴────────────────┴──────────┴─────────────────────────────────────────┘ + * │ └───────────────────┬─────────────────────┘ + * index_nullable_offset_ suffixLength_ + * └──────────────────────────────────┬───────────────────────────────────────────────────────┘ + * totalKeyLength_ + * + * Function: + * `make` : construct `PrefixPath` or `RangePath` according to `hints` + * `qualified(StringPiece)` : qulified key by bytes + * `qualified(Map)` : qulified row by value + * `resetPart` : reset current partitionID and reset `iter_` + * `encodeValue` : encode a Value to bytes + * + * + * ------------------------------------------------------------- + * + * + * + */ + +class Path; +class QualifiedStrategySet; +class IndexScanNode : public IndexNode { + FRIEND_TEST(IndexScanTest, Base); + FRIEND_TEST(IndexScanTest, Vertex); + FRIEND_TEST(IndexScanTest, Edge); + // There are too many unittests, so a helper is defined to access private data + friend class IndexScanTestHelper; + + public: + IndexScanNode(const IndexScanNode& node); IndexScanNode(RuntimeContext* context, + const std::string& name, IndexID indexId, - std::vector columnHints, - int64_t limit = -1) - : context_(context), indexId_(indexId), columnHints_(std::move(columnHints)), limit_(limit) { - /** - * columnHints's elements are {scanType = PREFIX|RANGE; beginStr; endStr}, - * {scanType = PREFIX|RANGE; beginStr; - * endStr},... if the scanType is RANGE, means the index scan is range scan. - * if all scanType are PREFIX, means the index scan is prefix scan. - * there should be only one RANGE hnit, and it must be the last one. - */ - for (size_t i = 0; i < columnHints_.size(); i++) { - if (columnHints_[i].get_scan_type() == cpp2::ScanType::RANGE) { - isRangeScan_ = true; - CHECK_EQ(columnHints_.size() - 1, i); - break; - } - } - RelNode::name_ = "IndexScanNode"; - } + const std::vector& columnHints, + ::nebula::kvstore::KVStore* kvstore) + : IndexNode(context, name), indexId_(indexId), columnHints_(columnHints), kvstore_(kvstore) {} + ::nebula::cpp2::ErrorCode init(InitContext& ctx) override; + std::string identify() override; - nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { - auto ret = RelNode::doExecute(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return ret; - } - auto scanRet = scanStr(partId); - if (!scanRet.ok()) { - return nebula::cpp2::ErrorCode::E_INVALID_FIELD_VALUE; - } - scanPair_ = scanRet.value(); - std::unique_ptr iter; - ret = isRangeScan_ ? context_->env()->kvstore_->range( - context_->spaceId(), partId, scanPair_.first, scanPair_.second, &iter) - : context_->env()->kvstore_->prefix( - context_->spaceId(), partId, scanPair_.first, &iter); - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED && iter && iter->valid()) { - context_->isEdge() - ? iter_.reset(new EdgeIndexIterator(std::move(iter), context_->vIdLen())) - : iter_.reset(new VertexIndexIterator(std::move(iter), context_->vIdLen())); - } else { - iter_.reset(); - return ret; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; + protected: + nebula::cpp2::ErrorCode doExecute(PartitionID partId) final; + Result doNext() final; + void decodePropFromIndex(folly::StringPiece key, + const Map& colPosMap, + std::vector& values); + virtual Row decodeFromIndex(folly::StringPiece key) = 0; + virtual nebula::cpp2::ErrorCode getBaseData(folly::StringPiece key, + std::pair& kv) = 0; + virtual Map decodeFromBase(const std::string& key, + const std::string& value) = 0; + virtual const std::vector>& getSchema() = 0; + bool checkTTL(); + nebula::cpp2::ErrorCode resetIter(PartitionID partId); + PartitionID partId_; + const IndexID indexId_; + std::shared_ptr index_; + bool indexNullable_ = false; + const std::vector& columnHints_; + std::unique_ptr path_; + std::unique_ptr iter_; + nebula::kvstore::KVStore* kvstore_; + std::vector requiredColumns_; + Set requiredAndHintColumns_; + std::pair> ttlProps_; + bool needAccessBase_{false}; + bool fatalOnBaseNotFound_{false}; + Map colPosMap_; +}; +class QualifiedStrategy { + public: + enum Result { INCOMPATIBLE = 0, UNCERTAIN = 1, COMPATIBLE = 2 }; + /** + * checkNull + * + * There are two overload `checkNull` functions: + * 1. First one which is with template arg `targetIsNull`, checks `columnIndex` at `nullable` + * whether equal to `targetIsNull` or not. + * 2. The other one which is without template, filters key whose `columnIndex` at `nullable` is + * true + * + * Args: + * `columnIndex` : Index of column. **NOTE** , however, that the order in nullable bytes is + * reversed + * `keyOffset` : Reference `Index Key Encode` -> `index_nullable_offset_` + * + * Return: + * For convenience, we define a variable x.When the value at `columnIndex` is null, x is true, + * Otherwise x is false. + * 1.With template.Return COMPATIBLE if `x`==`targetIsNull`,else INCOMPATIBLE + * 2.Without template.Return COMPATIBLE if `x`==false, else INCOMPATIBLE + */ + template + static QualifiedStrategy checkNull(size_t columnIndex, size_t keyOffset) { + QualifiedStrategy q; + q.func_ = [columnIndex, keyOffset](const folly::StringPiece& key) { + std::bitset<16> nullableBit; + auto v = *reinterpret_cast(key.data() + keyOffset); + nullableBit = v; + return nullableBit.test(15 - columnIndex) == targetIsNull ? Result::COMPATIBLE + : Result::INCOMPATIBLE; + }; + return q; } - - IndexIterator* iterator() { return iter_.get(); } - - std::vector moveData() { - auto* sh = context_->isEdge() ? context_->edgeSchema_ : context_->tagSchema_; - auto ttlProp = CommonUtils::ttlProps(sh); - data_.clear(); - int64_t count = 0; - while (!!iter_ && iter_->valid()) { - if (context_->isPlanKilled()) { - return {}; - } - if (!iter_->val().empty() && ttlProp.first) { - auto v = IndexKeyUtils::parseIndexTTL(iter_->val()); - if (CommonUtils::checkDataExpiredForTTL( - sh, std::move(v), ttlProp.second.second, ttlProp.second.first)) { - iter_->next(); - continue; - } - } - data_.emplace_back(iter_->key(), ""); - if (limit_ > 0 && ++count >= limit_) { - break; - } - iter_->next(); - } - return std::move(data_); + static QualifiedStrategy checkNull(size_t columnIndex, size_t keyOffset) { + QualifiedStrategy q; + q.func_ = [columnIndex, keyOffset](const folly::StringPiece& key) { + std::bitset<16> nullableBit; + auto v = *reinterpret_cast(key.data() + keyOffset); + nullableBit = v; + return nullableBit.test(15 - columnIndex) ? Result::INCOMPATIBLE : Result::COMPATIBLE; + }; + return q; } - - private: - StatusOr> scanStr(PartitionID partId) { - auto iRet = context_->isEdge() - ? context_->env()->indexMan_->getEdgeIndex(context_->spaceId(), indexId_) - : context_->env()->indexMan_->getTagIndex(context_->spaceId(), indexId_); - if (!iRet.ok()) { - return Status::IndexNotFound(); - } - if (isRangeScan_) { - return getRangeStr(partId, iRet.value()->get_fields()); - } else { - return getPrefixStr(partId, iRet.value()->get_fields()); - } + /** + * checkNaN + * + * Only for double. Check the value at `keyOffset` in indexKey is NaN or not. The logic here needs + * to be coordinated with the encoding logic of double numbers. + * + * Args: + * `keyOffset` : value offset at indexKey + * + * Return: + * Return INCOMPATIBLE if v==Nan else COMPATIBLE; + */ + static QualifiedStrategy checkNaN(size_t keyOffset) { + const char* chr = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"; // '\xFF' * 8 + QualifiedStrategy q; + q.func_ = [chr, keyOffset](const folly::StringPiece& key) { + int ret = memcmp(chr, key.data() + keyOffset, 8); + return ret == 0 ? Result::INCOMPATIBLE : Result::COMPATIBLE; + }; + return q; } - - StatusOr> getPrefixStr( - PartitionID partId, const std::vector<::nebula::meta::cpp2::ColumnDef>& fields) { - std::string prefix; - prefix.append(IndexKeyUtils::indexPrefix(partId, indexId_)); - for (auto& col : columnHints_) { - auto iter = std::find_if(fields.begin(), fields.end(), [col](const auto& field) { - return col.get_column_name() == field.get_name(); - }); - if (iter == fields.end()) { - VLOG(3) << "Field " << col.get_column_name() << " not found "; - return Status::Error("Field not found"); - } - auto type = IndexKeyUtils::toValueType(iter->type.type); - if (type == Value::Type::STRING && !iter->type.type_length_ref().has_value()) { - return Status::Error("String property index has not set prefix length."); - } - prefix.append(encodeValue(*col.begin_value_ref(), type, iter->type.get_type_length())); - } - return std::make_pair(prefix, ""); + /** + * dedupGeoIndex + * + * Because a `GEOGRAPHY` type data will generate multiple index keys pointing to the same base + * data,the base data pointed to by the indexkey should be de duplicated. + * + * Args: + * `dedupSuffixLength` : If indexed schema is a tag, `dedupSuffixLength` should be vid.len; + * If the indexed schema is an edge, `dedupSuffixLength` shoule be + * srcId.len+sizeof(rank)+dstId.len + * Return: + * When suffix first appears, the function returns `COMPATIBLE`; otherwise, the function returns + * `INCOMPATIBLE` + */ + static QualifiedStrategy dedupGeoIndex(size_t dedupSuffixLength) { + QualifiedStrategy q; + q.func_ = [suffixSet = Set(), + suffixLength = dedupSuffixLength](const folly::StringPiece& key) mutable -> Result { + std::string suffix = key.subpiece(key.size() - suffixLength, suffixLength).toString(); + auto [iter, result] = suffixSet.insert(std::move(suffix)); + return result ? Result::COMPATIBLE : Result::INCOMPATIBLE; + }; + return q; } - - StatusOr> getRangeStr( - PartitionID partId, const std::vector<::nebula::meta::cpp2::ColumnDef>& fields) { - std::string start, end; - start.append(IndexKeyUtils::indexPrefix(partId, indexId_)); - end.append(IndexKeyUtils::indexPrefix(partId, indexId_)); - for (auto& col : columnHints_) { - auto iter = std::find_if(fields.begin(), fields.end(), [col](const auto& field) { - return col.get_column_name() == field.get_name(); - }); - if (iter == fields.end()) { - VLOG(3) << "Field " << col.get_column_name() << " not found "; - return Status::Error("Field not found"); - } - auto type = IndexKeyUtils::toValueType(iter->get_type().get_type()); - if (type == Value::Type::STRING && !iter->get_type().type_length_ref().has_value()) { - return Status::Error("String property index has not set prefix length."); - } - if (col.get_scan_type() == cpp2::ScanType::PREFIX) { - start.append(encodeValue(*col.begin_value_ref(), type, iter->type.get_type_length())); - end.append(encodeValue(*col.begin_value_ref(), type, iter->type.get_type_length())); + /** + * constant + * + * Always return `result` + */ + template + static QualifiedStrategy constant() { + QualifiedStrategy q; + q.func_ = [](const folly::StringPiece&) { return result; }; + return q; + } + /** + * compareTruncated + * + * For a `String` type index, `val` may be truncated, and it is not enough to determine whether + * the indexkey complies with the constraint of columnhint only through the interval limit of + * [start,end) which is generated by `RangeIndex`. Therefore, it is necessary to make additional + * judgment on the truncated string type index + * For example: + * (ab)c meas that string is "abc" but index val has been truncated to "ab". (ab)c > ab is + * `UNCERTAIN`, and (ab)c > aa is COMPATIBLE. + * + * Args: + * `LEorGE` : It's an assit arg. true means LE and false means GE. + * `val` : Truncated `String` index value,whose length has been define in `IndexItem`. + * `keyStartPos` : The position in indexKey where start compare with `val` + * + * Return: + * Return `COMPATIBLE` if `val` is `LEorGE` than indexKey.Otherwise, return `UNCERTAIN`. + */ + template + static QualifiedStrategy compareTruncated(const std::string& val, size_t keyStartPos) { + QualifiedStrategy q; + q.func_ = [val, keyStartPos](const folly::StringPiece& key) { + int ret = memcmp(val.data(), key.data() + keyStartPos, val.size()); + if constexpr (LEorGE == true) { + CHECK_LE(ret, 0); } else { - start.append(encodeValue(*col.begin_value_ref(), type, iter->type.get_type_length())); - end.append(encodeValue(*col.end_value_ref(), type, iter->type.get_type_length())); + CHECK_GE(ret, 0); } - } - return std::make_pair(start, end); + return ret == 0 ? Result::UNCERTAIN : Result::COMPATIBLE; + }; + return q; } + // call + inline Result operator()(const folly::StringPiece& key); - // precondition: if type is STRING, strLen must be valid - std::string encodeValue(const Value& val, Value::Type type, const int16_t* strLen) { - if (val.isNull()) { - return IndexKeyUtils::encodeNullValue(type, strLen); - } - if (type == Value::Type::STRING) { - return IndexKeyUtils::encodeValue(val, *strLen); - } else { - return IndexKeyUtils::encodeValue(val); - } - } + private: + std::function func_; +}; +class QualifiedStrategySet { + public: + inline void insert(QualifiedStrategy&& strategy); + inline QualifiedStrategy::Result operator()(const folly::StringPiece& key); + + private: + std::vector strategyList_; +}; + +class Path { + public: + // enum class Qualified : int16_t { INCOMPATIBLE = 0, UNCERTAIN = 1, COMPATIBLE = 2 }; + // using QualifiedFunction = std::function; + using ColumnTypeDef = ::nebula::meta::cpp2::ColumnTypeDef; + Path(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen); + virtual ~Path() = default; + + static std::unique_ptr make(::nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen); + QualifiedStrategy::Result qualified(const folly::StringPiece& key); + virtual bool isRange() { return false; } + + virtual QualifiedStrategy::Result qualified(const Map& rowData) = 0; + virtual void resetPart(PartitionID partId) = 0; + const std::string& toString(); + + protected: + std::string encodeValue(const Value& value, + const ColumnTypeDef& colDef, + size_t index, + std::string& key); + QualifiedStrategySet strategySet_; + ::nebula::meta::cpp2::IndexItem* index_; + const meta::SchemaProviderIf* schema_; + const std::vector hints_; + std::vector nullable_; + int64_t index_nullable_offset_{8}; + int64_t totalKeyLength_{8}; + int64_t suffixLength_; + std::string serializeString_; +}; +class PrefixPath : public Path { + public: + PrefixPath(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen); + // Override + QualifiedStrategy::Result qualified(const Map& rowData) override; + void resetPart(PartitionID partId) override; + + const std::string& getPrefixKey() { return prefix_; } private: - RuntimeContext* context_; - IndexID indexId_; - bool isRangeScan_{false}; - std::unique_ptr iter_; - std::pair scanPair_; - std::vector columnHints_; - int64_t limit_; - std::vector data_; + std::string prefix_; + void buildKey(); }; +class RangePath : public Path { + public: + RangePath(nebula::meta::cpp2::IndexItem* index, + const meta::SchemaProviderIf* schema, + const std::vector& hints, + int64_t vidLen); + QualifiedStrategy::Result qualified(const Map& rowData) override; + void resetPart(PartitionID partId) override; + + inline bool includeStart() { return includeStart_; } + inline bool includeEnd() { return includeEnd_; } + inline const std::string& getStartKey() { return startKey_; } + inline const std::string& getEndKey() { return endKey_; } + bool isRange() override { return true; } + + private: + std::string startKey_, endKey_; + bool includeStart_ = true; + bool includeEnd_ = false; + + void buildKey(); + std::tuple encodeRange( + const cpp2::IndexColumnHint& hint, + const nebula::meta::cpp2::ColumnTypeDef& colTypeDef, + size_t colIndex, + size_t offset); + inline std::string encodeString(const Value& value, size_t len, bool& truncated); + inline std::string encodeFloat(const Value& value, bool& isNaN); + std::string encodeBeginValue(const Value& value, + const ColumnTypeDef& colDef, + std::string& key, + size_t offset); + std::string encodeEndValue(const Value& value, + const ColumnTypeDef& colDef, + std::string& key, + size_t offset); +}; +/* define inline functions */ +QualifiedStrategy::Result QualifiedStrategySet::operator()(const folly::StringPiece& key) { + QualifiedStrategy::Result ret = QualifiedStrategy::COMPATIBLE; + for (auto& s : strategyList_) { + ret = std::min(ret, s(key)); + } + return ret; +} +void QualifiedStrategySet::insert(QualifiedStrategy&& strategy) { + strategyList_.emplace_back(std::move(strategy)); +} +inline QualifiedStrategy::Result QualifiedStrategy::operator()(const folly::StringPiece& key) { + return func_(key); +} } // namespace storage -} // namespace nebula -#endif // STORAGE_EXEC_INDEXSCANNODE_H_ +} // namespace nebula diff --git a/src/storage/exec/IndexSelectionNode.cpp b/src/storage/exec/IndexSelectionNode.cpp new file mode 100644 index 00000000000..b69e6bbfcae --- /dev/null +++ b/src/storage/exec/IndexSelectionNode.cpp @@ -0,0 +1,78 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexSelectionNode.h" +namespace nebula { +namespace storage { +IndexSelectionNode::IndexSelectionNode(const IndexSelectionNode& node) + : IndexNode(node), expr_(node.expr_), colPos_(node.colPos_) { + ctx_ = std::make_unique(colPos_); +} + +IndexSelectionNode::IndexSelectionNode(RuntimeContext* context, Expression* expr) + : IndexNode(context, "IndexSelectionNode"), expr_(expr) {} +nebula::cpp2::ErrorCode IndexSelectionNode::init(InitContext& ctx) { + DCHECK_EQ(children_.size(), 1); + SelectionExprVisitor vis; + expr_->accept(&vis); + for (auto& col : vis.getRequiredColumns()) { + ctx.requiredColumns.insert(col); + } + auto ret = children_[0]->init(ctx); + if (UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + for (auto& col : vis.getRequiredColumns()) { + colPos_[col] = ctx.retColMap.at(col); + } + ctx_ = std::make_unique(colPos_); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} + +IndexNode::Result IndexSelectionNode::doNext() { + DCHECK_EQ(children_.size(), 1); + auto& child = *children_[0]; + do { + auto result = child.next(); + if (!result.hasData()) { + return result; + } + if (filter(result.row())) { + return result; + } + } while (true); + return Result(); +} + +std::unique_ptr IndexSelectionNode::copy() { + return std::make_unique(*this); +} + +std::string IndexSelectionNode::identify() { + return fmt::format("{}(expr=[{}])", name_, expr_->toString()); +} + +Value IndexSelectionNode::ExprContext::getEdgeProp(const std::string& edgeType, + const std::string& prop) const { + UNUSED(edgeType); + DCHECK(row_ != nullptr); + auto iter = colPos_.find(prop); + DCHECK(iter != colPos_.end()); + DCHECK(iter->second < row_->size()); + return (*row_)[iter->second]; +} + +Value IndexSelectionNode::ExprContext::getTagProp(const std::string& tag, + const std::string& prop) const { + UNUSED(tag); + DCHECK(row_ != nullptr); + auto iter = colPos_.find(prop); + DCHECK(iter != colPos_.end()); + DCHECK(iter->second < row_->size()); + return (*row_)[iter->second]; +} + +} // namespace storage + +} // namespace nebula diff --git a/src/storage/exec/IndexSelectionNode.h b/src/storage/exec/IndexSelectionNode.h new file mode 100644 index 00000000000..a134ecf890d --- /dev/null +++ b/src/storage/exec/IndexSelectionNode.h @@ -0,0 +1,136 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once + +#include "common/context/ExpressionContext.h" +#include "common/expression/Expression.h" +#include "folly/container/F14Map.h" +#include "storage/ExprVisitorBase.h" +#include "storage/exec/IndexNode.h" +namespace nebula { +namespace storage { +/** + * + * IndexSelectionNode + * + * reference: IndexNode + * + * `IndexSelectionNode` is the class which is used to filter data by given expression in RPC + * request. + * ┌───────────┐ + * │ IndexNode │ + * └─────┬─────┘ + * │ + * ┌─────────┴──────────┐ + * │ IndexSelectionNode │ + * └────────────────────┘ + * Member: + * `expr_` : expression used to filter + * `colPos_`: column's position in Row which is during eval `expr_` + * `ctx_` : used to eval expression + * Function: + * `filter` : compute `expr_` + * + * + * ------------------------------------------------------------------------------------------------ + * IndexSelectionNode::ExprContext + * + * `ExprContext` is a derive class of ExpressionContext which is needed in eval expression. + * NOTICE: There are many node in the entire storage plan tree where expressions need to be + * evaluated(e.g., Projection,Aggregate,etc.). So `ExprContext` may be not an internal + * class of IndexSelectionNode. + */ +class IndexSelectionNode : public IndexNode { + public: + IndexSelectionNode(const IndexSelectionNode &node); + IndexSelectionNode(RuntimeContext *context, Expression *expr); + nebula::cpp2::ErrorCode init(InitContext &ctx) override; + std::unique_ptr copy() override; + std::string identify() override; + + private: + Result doNext() override; + inline bool filter(const Row &row) { + ctx_->setRow(row); + auto &result = expr_->eval(*ctx_); + return result.type() == Value::Type::BOOL ? result.getBool() : false; + } + Expression *expr_; + Map colPos_; + // TODO(hs.zhang): `ExprContext` could be moved out later if we unify the valcano in go/lookup + class ExprContext : public ExpressionContext { + public: + explicit ExprContext(const Map &colPos) : colPos_(colPos) {} + void setRow(const Row &row) { row_ = &row; } + Value getEdgeProp(const std::string &edgeType, const std::string &prop) const override; + Value getTagProp(const std::string &tag, const std::string &prop) const override; + // override + const Value &getVar(const std::string &var) const override { + UNUSED(var); + return fatal(__FILE__, __LINE__); + } + const Value &getVersionedVar(const std::string &var, int64_t version) const override { + UNUSED(var), UNUSED(version); + return fatal(__FILE__, __LINE__); + } + const Value &getVarProp(const std::string &var, const std::string &prop) const override { + UNUSED(var), UNUSED(prop); + return fatal(__FILE__, __LINE__); + } + Value getSrcProp(const std::string &tag, const std::string &prop) const override { + UNUSED(tag), UNUSED(prop); + return fatal(__FILE__, __LINE__); + } + const Value &getDstProp(const std::string &tag, const std::string &prop) const override { + UNUSED(tag), UNUSED(prop); + return fatal(__FILE__, __LINE__); + } + const Value &getInputProp(const std::string &prop) const override { + UNUSED(prop); + return fatal(__FILE__, __LINE__); + } + Value getVertex(const std::string &) const override { return fatal(__FILE__, __LINE__); } + Value getEdge() const override { return fatal(__FILE__, __LINE__); } + Value getColumn(int32_t index) const override { + UNUSED(index); + return fatal(__FILE__, __LINE__); + } + void setVar(const std::string &var, Value val) override { + UNUSED(var), UNUSED(val); + fatal(__FILE__, __LINE__); + } + + private: + const Map &colPos_; + const Row *row_; + inline const Value &fatal(const std::string &file, int line) const { + LOG(FATAL) << "Unexpect at " << file << ":" << line; + static Value placeholder; + return placeholder; + } + }; + std::unique_ptr ctx_; +}; + +class SelectionExprVisitor : public ExprVisitorBase { + public: + void visit(EdgeSrcIdExpression *expr) override { requiredColumns_.insert(expr->prop()); } + void visit(EdgeTypeExpression *expr) override { requiredColumns_.insert(expr->prop()); } + void visit(EdgeRankExpression *expr) override { requiredColumns_.insert(expr->prop()); } + void visit(EdgeDstIdExpression *expr) override { requiredColumns_.insert(expr->prop()); } + void visit(TagPropertyExpression *expr) override { requiredColumns_.insert(expr->prop()); } + void visit(EdgePropertyExpression *expr) override { requiredColumns_.insert(expr->prop()); } + const Set &getRequiredColumns() { return requiredColumns_; } + ::nebula::cpp2::ErrorCode getCode() { return code_; } + + private: + using ExprVisitorBase::visit; + Set requiredColumns_; + ::nebula::cpp2::ErrorCode code_; +}; + +} // namespace storage + +} // namespace nebula diff --git a/src/storage/exec/IndexVertexNode.h b/src/storage/exec/IndexVertexNode.h deleted file mode 100644 index 7b2cae38e4b..00000000000 --- a/src/storage/exec/IndexVertexNode.h +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ -#ifndef STORAGE_EXEC_INDEXVERTEXNODE_H_ -#define STORAGE_EXEC_INDEXVERTEXNODE_H_ - -#include "common/base/Base.h" -#include "storage/exec/IndexScanNode.h" -#include "storage/exec/RelNode.h" - -namespace nebula { -namespace storage { - -template -class IndexVertexNode final : public RelNode { - public: - using RelNode::doExecute; - - IndexVertexNode(RuntimeContext* context, - IndexScanNode* indexScanNode, - const std::vector>& schemas, - const std::string& schemaName, - int64_t limit = -1) - : context_(context), - indexScanNode_(indexScanNode), - schemas_(schemas), - schemaName_(schemaName), - limit_(limit) { - RelNode::name_ = "IndexVertexNode"; - } - - nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { - auto ret = RelNode::doExecute(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return ret; - } - - auto ttlProp = CommonUtils::ttlProps(context_->tagSchema_); - - data_.clear(); - std::vector vids; - auto* iter = static_cast(indexScanNode_->iterator()); - - while (iter && iter->valid()) { - if (context_->isPlanKilled()) { - return nebula::cpp2::ErrorCode::E_PLAN_IS_KILLED; - } - if (!iter->val().empty() && ttlProp.first) { - auto v = IndexKeyUtils::parseIndexTTL(iter->val()); - if (CommonUtils::checkDataExpiredForTTL( - context_->tagSchema_, std::move(v), ttlProp.second.second, ttlProp.second.first)) { - iter->next(); - continue; - } - } - vids.emplace_back(iter->vId()); - iter->next(); - } - int64_t count = 0; - for (const auto& vId : vids) { - VLOG(1) << "partId " << partId << ", vId " << vId << ", tagId " << context_->tagId_; - auto key = NebulaKeyUtils::vertexKey(context_->vIdLen(), partId, vId, context_->tagId_); - std::string val; - ret = context_->env()->kvstore_->get(context_->spaceId(), partId, key, &val); - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { - data_.emplace_back(std::move(key), std::move(val)); - } else if (ret == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { - continue; - } else { - return ret; - } - if (limit_ > 0 && ++count >= limit_) { - break; - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - std::vector moveData() { return std::move(data_); } - - const std::vector>& getSchemas() { - return schemas_; - } - - const std::string& getSchemaName() { return schemaName_; } - - private: - RuntimeContext* context_; - IndexScanNode* indexScanNode_; - const std::vector>& schemas_; - const std::string& schemaName_; - int64_t limit_; - std::vector data_; -}; - -} // namespace storage -} // namespace nebula -#endif // STORAGE_EXEC_INDEXVERTEXNODE_H_ diff --git a/src/storage/exec/IndexVertexScanNode.cpp b/src/storage/exec/IndexVertexScanNode.cpp new file mode 100644 index 00000000000..499a4d59d8a --- /dev/null +++ b/src/storage/exec/IndexVertexScanNode.cpp @@ -0,0 +1,117 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#include "storage/exec/IndexVertexScanNode.h" + +#include "codec/RowReaderWrapper.h" +#include "common/utils/NebulaKeyUtils.h" +#include "storage/exec/QueryUtils.h" +namespace nebula { +namespace storage { + +IndexVertexScanNode::IndexVertexScanNode(const IndexVertexScanNode& node) + : IndexScanNode(node), tag_(node.tag_) {} + +IndexVertexScanNode::IndexVertexScanNode(RuntimeContext* context, + IndexID indexId, + const std::vector& clolumnHint, + ::nebula::kvstore::KVStore* kvstore) + : IndexScanNode(context, "IndexVertexScanNode", indexId, clolumnHint, kvstore) { + getIndex = std::function([this](std::shared_ptr& index) { + auto env = this->context_->env(); + auto indexMgr = env->indexMan_; + auto indexVal = indexMgr->getTagIndex(this->spaceId_, this->indexId_); + if (!indexVal.ok()) { + return ::nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND; + } + index = indexVal.value(); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }); + getTag = std::function([this](TagSchemas& tag) { + auto env = this->context_->env(); + auto schemaMgr = env->schemaMan_; + auto allSchema = schemaMgr->getAllVerTagSchema(this->spaceId_); + auto tagId = this->index_->get_schema_id().get_tag_id(); + if (!allSchema.ok() || !allSchema.value().count(tagId)) { + return ::nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; + } + tag = allSchema.value().at(tagId); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }); +} + +::nebula::cpp2::ErrorCode IndexVertexScanNode::init(InitContext& ctx) { + if (auto ret = getIndex(this->index_); UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + if (auto ret = getTag(tag_); UNLIKELY(ret != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return ret; + } + return IndexScanNode::init(ctx); +} + +nebula::cpp2::ErrorCode IndexVertexScanNode::getBaseData(folly::StringPiece key, + std::pair& kv) { + kv.first = NebulaKeyUtils::vertexKey(context_->vIdLen(), + partId_, + key.subpiece(key.size() - context_->vIdLen()).toString(), + context_->tagId_); + return kvstore_->get(context_->spaceId(), partId_, kv.first, &kv.second); +} + +Row IndexVertexScanNode::decodeFromIndex(folly::StringPiece key) { + std::vector values(requiredColumns_.size()); + if (colPosMap_.count(kVid)) { + auto vId = IndexKeyUtils::getIndexVertexID(context_->vIdLen(), key); + if (context_->isIntId()) { + values[colPosMap_[kVid]] = Value(*reinterpret_cast(vId.data())); + } else { + values[colPosMap_[kVid]] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } + if (colPosMap_.count(kTag)) { + values[colPosMap_[kTag]] = Value(context_->tagId_); + } + key.subtract(context_->vIdLen()); + decodePropFromIndex(key, colPosMap_, values); + return Row(std::move(values)); +} + +Map IndexVertexScanNode::decodeFromBase(const std::string& key, + const std::string& value) { + Map values; + auto reader = RowReaderWrapper::getRowReader(tag_, folly::StringPiece(value)); + for (auto& col : requiredAndHintColumns_) { + switch (QueryUtils::toReturnColType(col)) { + case QueryUtils::ReturnColType::kVid: { + auto vId = NebulaKeyUtils::getVertexId(context_->vIdLen(), key); + if (context_->isIntId()) { + values[col] = Value(*reinterpret_cast(vId.data())); + } else { + values[col] = Value(vId.subpiece(0, vId.find_first_of('\0')).toString()); + } + } break; + case QueryUtils::ReturnColType::kTag: { + values[col] = Value(context_->tagId_); + } break; + case QueryUtils::ReturnColType::kOther: { + auto retVal = QueryUtils::readValue(reader.get(), col, tag_.back()->field(col)); + if (!retVal.ok()) { + LOG(FATAL) << "Bad value for field" << col; + } + values[col] = std::move(retVal.value()); + } break; + default: + LOG(FATAL) << "Unexpect column name:" << col; + } + } + return values; +} + +std::unique_ptr IndexVertexScanNode::copy() { + return std::make_unique(*this); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/IndexVertexScanNode.h b/src/storage/exec/IndexVertexScanNode.h new file mode 100644 index 00000000000..fea56a19adb --- /dev/null +++ b/src/storage/exec/IndexVertexScanNode.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ +#pragma once + +#include + +#include +#include + +#include "common/base/Base.h" +#include "storage/exec/IndexScanNode.h" +#include "storage/exec/StorageIterator.h" + +namespace nebula { +namespace storage { + +/** + * IndexVertexScanNode + * + * reference: IndexScanNode + */ +class IndexVertexScanNode final : public IndexScanNode { + public: + IndexVertexScanNode(const IndexVertexScanNode& node); + IndexVertexScanNode(RuntimeContext* context, + IndexID indexId, + const std::vector& clolumnHint, + ::nebula::kvstore::KVStore* kvstore); + ::nebula::cpp2::ErrorCode init(InitContext& ctx) override; + std::unique_ptr copy() override; + + private: + nebula::cpp2::ErrorCode getBaseData(folly::StringPiece key, + std::pair& kv) override; + Row decodeFromIndex(folly::StringPiece key) override; + Map decodeFromBase(const std::string& key, const std::string& value) override; + + using TagSchemas = std::vector>; + const TagSchemas& getSchema() override { return tag_; } + TagSchemas tag_; + using IndexItem = ::nebula::meta::cpp2::IndexItem; + // Convenient for testing + std::function<::nebula::cpp2::ErrorCode(std::shared_ptr&)> getIndex; + std::function<::nebula::cpp2::ErrorCode(TagSchemas&)> getTag; + + FRIEND_TEST(IndexScanTest, VertexIndexOnlyScan); + FRIEND_TEST(IndexScanTest, VertexBase); + FRIEND_TEST(IndexScanTest, Prefix1); + FRIEND_TEST(IndexScanTest, Prefix2); + FRIEND_TEST(IndexScanTest, Base); + FRIEND_TEST(IndexScanTest, Vertex); + friend class IndexScanTestHelper; +}; +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/StorageIterator.h b/src/storage/exec/StorageIterator.h index 49821069f00..c4fc30d456b 100644 --- a/src/storage/exec/StorageIterator.h +++ b/src/storage/exec/StorageIterator.h @@ -6,11 +6,11 @@ #ifndef STORAGE_EXEC_STORAGEITERATOR_H_ #define STORAGE_EXEC_STORAGEITERATOR_H_ +#include "codec/RowReaderWrapper.h" #include "common/base/Base.h" #include "kvstore/KVIterator.h" #include "storage/CommonUtils.h" #include "storage/StorageFlags.h" - namespace nebula { namespace storage { diff --git a/src/storage/index/LookupBaseProcessor-inl.h b/src/storage/index/LookupBaseProcessor-inl.h deleted file mode 100644 index 2de86f3b1af..00000000000 --- a/src/storage/index/LookupBaseProcessor-inl.h +++ /dev/null @@ -1,472 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#pragma once - -#include "LookupBaseProcessor.h" -#include "folly/container/Enumerate.h" -namespace nebula { -namespace storage { - -template -nebula::cpp2::ErrorCode LookupBaseProcessor::requestCheck( - const cpp2::LookupIndexRequest& req) { - spaceId_ = req.get_space_id(); - auto retCode = this->getSpaceVidLen(spaceId_); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - return retCode; - } - this->planContext_ = std::make_unique( - this->env_, spaceId_, this->spaceVidLen_, this->isIntId_, req.common_ref()); - const auto& indices = req.get_indices(); - const auto& schemaId = indices.get_schema_id(); - this->planContext_->isEdge_ = schemaId.getType() == nebula::cpp2::SchemaID::Type::edge_type; - this->context_ = std::make_unique(this->planContext_.get()); - if (context_->isEdge()) { - context_->edgeType_ = schemaId.get_edge_type(); - auto edgeName = this->env_->schemaMan_->toEdgeName(spaceId_, context_->edgeType_); - if (!edgeName.ok()) { - return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; - } - context_->edgeName_ = std::move(edgeName.value()); - auto allEdges = this->env_->schemaMan_->getAllVerEdgeSchema(spaceId_); - if (!allEdges.ok()) { - return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; - } - if (!allEdges.value().count(context_->edgeType_)) { - return nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; - } - schemas_ = std::move(allEdges).value()[context_->edgeType_]; - context_->edgeSchema_ = schemas_.back().get(); - } else { - context_->tagId_ = schemaId.get_tag_id(); - auto tagName = this->env_->schemaMan_->toTagName(spaceId_, context_->tagId_); - if (!tagName.ok()) { - return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; - } - context_->tagName_ = std::move(tagName.value()); - auto allTags = this->env_->schemaMan_->getAllVerTagSchema(spaceId_); - if (!allTags.ok()) { - return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; - } - if (!allTags.value().count(context_->tagId_)) { - return nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; - } - schemas_ = std::move(allTags).value()[context_->tagId_]; - context_->tagSchema_ = schemas_.back().get(); - } - - if (indices.get_contexts().empty() || !req.return_columns_ref().has_value() || - (*req.return_columns_ref()).empty()) { - return nebula::cpp2::ErrorCode::E_INVALID_OPERATION; - } - indexContexts_ = indices.get_contexts(); - - // setup yield columns. - if (req.return_columns_ref().has_value()) { - yieldCols_ = *req.return_columns_ref(); - } - - for (auto&& it : folly::enumerate(yieldCols_)) { - resultDataSet_.colNames.emplace_back(*it); - if (QueryUtils::toReturnColType(*it) != QueryUtils::ReturnColType::kOther) { - deDupColPos_.emplace_back(it.index); - } - } - - // limit - if (req.limit_ref().has_value()) { - if (*req.limit_ref() < 0) { - LOG(ERROR) << "Incorrect parameter : LIMIT = " << *req.limit_ref(); - return nebula::cpp2::ErrorCode::E_INVALID_PARM; - } - limit_ = *req.limit_ref(); - } - - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -template -bool LookupBaseProcessor::isOutsideIndex(Expression* filter, - const meta::cpp2::IndexItem* index) { - static const std::set propsInEdgeKey{kSrc, kType, kRank, kDst}; - auto fields = index->get_fields(); - switch (filter->kind()) { - case Expression::Kind::kLogicalOr: - case Expression::Kind::kLogicalAnd: { - auto* lExpr = static_cast(filter); - for (auto& expr : lExpr->operands()) { - auto ret = isOutsideIndex(expr, index); - if (ret) { - return ret; - } - } - break; - } - case Expression::Kind::kRelLE: - case Expression::Kind::kRelIn: - case Expression::Kind::kRelGE: - case Expression::Kind::kRelEQ: - case Expression::Kind::kRelLT: - case Expression::Kind::kRelGT: - case Expression::Kind::kRelNE: - case Expression::Kind::kRelNotIn: { - auto* rExpr = static_cast(filter); - auto ret = isOutsideIndex(rExpr->left(), index); - if (ret) { - return ret; - } - ret = isOutsideIndex(rExpr->right(), index); - if (ret) { - return ret; - } - break; - } - case Expression::Kind::kEdgeSrc: - case Expression::Kind::kEdgeType: - case Expression::Kind::kEdgeRank: - case Expression::Kind::kEdgeDst: { - auto* sExpr = static_cast(filter); - auto propName = sExpr->prop(); - return propsInEdgeKey.find(propName) == propsInEdgeKey.end(); - } - case Expression::Kind::kTagProperty: - case Expression::Kind::kEdgeProperty: { - auto* sExpr = static_cast(filter); - auto propName = sExpr->prop(); - auto it = std::find_if(fields.begin(), fields.end(), [&propName](const auto& f) { - return f.get_name() == propName; - }); - return it == fields.end(); - } - default: { - return false; - } - } - return false; -} - -/** - * lookup plan should be : - * +--------+---------+ - * | Plan | - * +--------+---------+ - * | - * +--------+---------+ - * | DeDupNode | - * +--------+---------+ - * | - * +----------+-----------+ - * + IndexOutputNode... + - * +----------+-----------+ - **/ - -template -StatusOr> LookupBaseProcessor::buildPlan( - IndexFilterItem* filterItem, nebula::DataSet* result) { - StoragePlan plan; - // TODO(sky) : Limit is not supported yet for de-dup node. - // Related to paging scan, the de-dup execution plan needs to be refactored - auto deDup = std::make_unique>(result, deDupColPos_); - int32_t filterId = 0; - std::unique_ptr> out; - auto pool = &planContext_->objPool_; - - for (const auto& ctx : indexContexts_) { - const auto& indexId = ctx.get_index_id(); - auto needFilter = ctx.filter_ref().is_set() && !(*ctx.filter_ref()).empty(); - - // Check whether a data node is required. - // If a non-indexed column appears in the WHERE clause or YIELD clause, - // That means need to query the corresponding data. - bool needData = false; - auto index = context_->isEdge() ? this->env_->indexMan_->getEdgeIndex(spaceId_, indexId) - : this->env_->indexMan_->getTagIndex(spaceId_, indexId); - if (!index.ok()) { - return Status::IndexNotFound(); - } - - // check nullable column - bool hasNullableCol = false; - - auto* indexItem = index.value().get(); - auto fields = indexItem->get_fields(); - - for (const auto& col : fields) { - if (!hasNullableCol && col.nullable_ref().value_or(false)) { - hasNullableCol = true; - break; - } - } - - for (const auto& yieldCol : yieldCols_) { - static const std::set propsInKey{kVid, kTag, kSrc, kType, kRank, kDst}; - if (propsInKey.count(yieldCol)) { - continue; - } - auto it = std::find_if(fields.begin(), fields.end(), [&yieldCol](const auto& columnDef) { - return yieldCol == columnDef.get_name(); - }); - if (it == fields.end() || - it->get_type().get_type() == - nebula::cpp2::PropertyType::GEOGRAPHY) { // geography index just stores - // S2CellId, so must read the - // original geo data. - needData = true; - break; - } - } - auto colHints = ctx.get_column_hints(); - - // Check WHERE clause contains columns that ware not indexed - if (ctx.filter_ref().is_set() && !(*ctx.filter_ref()).empty()) { - auto filter = Expression::decode(pool, *ctx.filter_ref()); - auto isFieldsOutsideIndex = isOutsideIndex(filter, indexItem); - if (isFieldsOutsideIndex) { - needData = needFilter = true; - } - } - - if (!needData && !needFilter) { - out = buildPlanBasic(result, ctx, plan, hasNullableCol, fields); - } else if (needData && !needFilter) { - out = buildPlanWithData(result, ctx, plan); - } else if (!needData && needFilter) { - auto expr = Expression::decode(pool, ctx.get_filter()); - auto exprCtx = std::make_unique( - context_->vIdLen(), context_->isIntId(), hasNullableCol, fields); - filterItem->emplace(filterId, std::make_pair(std::move(exprCtx), expr)); - out = buildPlanWithFilter( - result, ctx, plan, (*filterItem)[filterId].first.get(), (*filterItem)[filterId].second); - filterId++; - } else { - auto expr = Expression::decode(pool, ctx.get_filter()); - // Need to get columns in data, expr ctx need to be aware of schema - const auto& schemaName = context_->isEdge() ? context_->edgeName_ : context_->tagName_; - if (schemas_.empty()) { - return Status::Error("Schema not found"); - } - auto exprCtx = std::make_unique(context_->vIdLen(), - context_->isIntId(), - schemaName, - schemas_.back().get(), - context_->isEdge()); - filterItem->emplace(filterId, std::make_pair(std::move(exprCtx), expr)); - out = buildPlanWithDataAndFilter( - result, ctx, plan, (*filterItem)[filterId].first.get(), (*filterItem)[filterId].second); - filterId++; - } - if (out == nullptr) { - return Status::Error("Index scan plan error"); - } - deDup->addDependency(out.get()); - plan.addNode(std::move(out)); - } - plan.addNode(std::move(deDup)); - return plan; -} - -/** - * - * +----------+-----------+ - * + IndexOutputNode + - * +----------+-----------+ - * | - * +----------+-----------+ - * + IndexScanNode + - * +----------+-----------+ - * - * If this is a simple index scan, Just having IndexScanNode is enough. for - *example : tag (c1, c2, c3) index on tag (c1, c2, c3) hint : lookup index where - *c1 == 1 and c2 == 1 and c3 == 1 yield c1,c2,c3 - **/ -template -std::unique_ptr> LookupBaseProcessor::buildPlanBasic( - nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - bool hasNullableCol, - const std::vector& fields) { - auto indexId = ctx.get_index_id(); - auto colHints = ctx.get_column_hints(); - auto indexScan = std::make_unique>( - context_.get(), indexId, std::move(colHints), limit_); - - auto output = std::make_unique>( - result, context_.get(), indexScan.get(), hasNullableCol, fields); - output->addDependency(indexScan.get()); - plan.addNode(std::move(indexScan)); - return output; -} - -/** - * - * +----------+-----------+ - * + IndexOutputNode + - * +----------+-----------+ - * | - * +----------------+-----------------+ - * + IndexEdgeNode or IndexVertexNode + - * +----------------+-----------------+ - * | - * +----------+-----------+ - * + IndexScanNode + - * +----------+-----------+ - * - * If a non-indexed column appears in the YIELD clause, and no expression - *filtering is required . for example : tag (c1, c2, c3) index on tag (c1, c2) - * hint : lookup index where c1 == 1 and c2 == 1 yield c3 - **/ -template -std::unique_ptr> LookupBaseProcessor::buildPlanWithData( - nebula::DataSet* result, const cpp2::IndexQueryContext& ctx, StoragePlan& plan) { - auto indexId = ctx.get_index_id(); - auto colHints = ctx.get_column_hints(); - - auto indexScan = - std::make_unique>(context_.get(), indexId, std::move(colHints)); - if (context_->isEdge()) { - auto edge = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->edgeName_, limit_); - edge->addDependency(indexScan.get()); - auto output = std::make_unique>(result, context_.get(), edge.get()); - output->addDependency(edge.get()); - plan.addNode(std::move(indexScan)); - plan.addNode(std::move(edge)); - return output; - } else { - auto vertex = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->tagName_, limit_); - vertex->addDependency(indexScan.get()); - auto output = std::make_unique>(result, context_.get(), vertex.get()); - output->addDependency(vertex.get()); - plan.addNode(std::move(indexScan)); - plan.addNode(std::move(vertex)); - return output; - } -} - -/** - * - * +----------+-----------+ - * + IndexOutputNode + - * +----------+-----------+ - * | - * +----------+-----------+ - * + IndexFilterNode + - * +----------+-----------+ - * | - * +----------+-----------+ - * + IndexScanNode + - * +----------+-----------+ - * - * If have not non-indexed column appears in the YIELD clause, and expression - *filtering is required . for example : tag (c1, c2, c3) index on tag (c1, c2) - * hint : lookup index where c1 > 1 and c2 > 1 - **/ -template -std::unique_ptr> LookupBaseProcessor::buildPlanWithFilter( - nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - StorageExpressionContext* exprCtx, - Expression* exp) { - auto indexId = ctx.get_index_id(); - auto colHints = ctx.get_column_hints(); - - auto indexScan = - std::make_unique>(context_.get(), indexId, std::move(colHints)); - - auto filter = std::make_unique>( - context_.get(), indexScan.get(), exprCtx, exp, context_->isEdge(), limit_); - filter->addDependency(indexScan.get()); - auto output = - std::make_unique>(result, context_.get(), filter.get(), true); - output->addDependency(filter.get()); - plan.addNode(std::move(indexScan)); - plan.addNode(std::move(filter)); - return output; -} - -/** - * - * +----------+-----------+ - * + IndexOutputNode + - * +----------+-----------+ - * | - * +----------+-----------+ - * + IndexFilterNode + - * +----------+-----------+ - * | - * +----------------+-----------------+ - * + IndexEdgeNode or IndexVertexNode + - * +----------------+-----------------+ - * | - * +----------+-----------+ - * + IndexScanNode + - * +----------+-----------+ - * - * If a non-indexed column appears in the WHERE clause or YIELD clause, - * and expression filtering is required . - * for example : - * tag (c1, c2, c3) - * index on tag (c1, c2) - * hint : lookup index where c1 == 1 and c2 == 1 and c3 > 1 yield c3 - * lookup index where c1 == 1 and c2 == 1 and c3 > 1 - * lookup index where c1 == 1 and c3 == 1 - **/ -template -std::unique_ptr> -LookupBaseProcessor::buildPlanWithDataAndFilter(nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - StorageExpressionContext* exprCtx, - Expression* exp) { - auto indexId = ctx.get_index_id(); - auto colHints = ctx.get_column_hints(); - - auto indexScan = - std::make_unique>(context_.get(), indexId, std::move(colHints)); - if (context_->isEdge()) { - auto edge = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->edgeName_); - edge->addDependency(indexScan.get()); - auto filter = std::make_unique>( - context_.get(), edge.get(), exprCtx, exp, limit_); - filter->addDependency(edge.get()); - - auto output = std::make_unique>(result, context_.get(), filter.get()); - output->addDependency(filter.get()); - plan.addNode(std::move(indexScan)); - plan.addNode(std::move(edge)); - plan.addNode(std::move(filter)); - return output; - } else { - auto vertex = std::make_unique>( - context_.get(), indexScan.get(), schemas_, context_->tagName_); - vertex->addDependency(indexScan.get()); - auto filter = std::make_unique>( - context_.get(), vertex.get(), exprCtx, exp, limit_); - filter->addDependency(vertex.get()); - - auto output = std::make_unique>(result, context_.get(), filter.get()); - output->addDependency(filter.get()); - plan.addNode(std::move(indexScan)); - plan.addNode(std::move(vertex)); - plan.addNode(std::move(filter)); - return output; - } -} -template -void LookupBaseProcessor::profilePlan(StoragePlan& plan) { - auto& nodes = plan.getNodes(); - std::lock_guard lck(BaseProcessor::profileMut_); - for (auto& node : nodes) { - BaseProcessor::profileDetail(node->name_, node->duration_.elapsedInUSec()); - } -} - -} // namespace storage -} // namespace nebula diff --git a/src/storage/index/LookupBaseProcessor.h b/src/storage/index/LookupBaseProcessor.h deleted file mode 100644 index dce3165b73b..00000000000 --- a/src/storage/index/LookupBaseProcessor.h +++ /dev/null @@ -1,92 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef STORAGE_QUERY_LOOKUPBASEPROCESSOR_H_ -#define STORAGE_QUERY_LOOKUPBASEPROCESSOR_H_ - -#include "common/base/Base.h" -#include "storage/BaseProcessor.h" -#include "storage/exec/AggregateNode.h" -#include "storage/exec/DeDupNode.h" -#include "storage/exec/IndexEdgeNode.h" -#include "storage/exec/IndexFilterNode.h" -#include "storage/exec/IndexOutputNode.h" -#include "storage/exec/IndexScanNode.h" -#include "storage/exec/IndexVertexNode.h" -#include "storage/exec/StoragePlan.h" - -namespace nebula { -namespace storage { -using IndexFilterItem = - std::unordered_map, Expression*>>; - -template -class LookupBaseProcessor : public BaseProcessor { - public: - virtual ~LookupBaseProcessor() = default; - - virtual void process(const REQ& req) = 0; - - protected: - LookupBaseProcessor(StorageEnv* env, - const ProcessorCounters* counters, - folly::Executor* executor = nullptr) - : BaseProcessor(env, counters), executor_(executor) {} - - virtual void onProcessFinished() = 0; - - nebula::cpp2::ErrorCode requestCheck(const cpp2::LookupIndexRequest& req); - - bool isOutsideIndex(Expression* filter, const meta::cpp2::IndexItem* index); - - StatusOr> buildPlan(IndexFilterItem* filterItem, nebula::DataSet* result); - - std::unique_ptr> buildPlanBasic( - nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - bool hasNullableCol, - const std::vector& fields); - - std::unique_ptr> buildPlanWithData(nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan); - - std::unique_ptr> buildPlanWithFilter(nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - StorageExpressionContext* exprCtx, - Expression* exp); - - std::unique_ptr> buildPlanWithDataAndFilter( - nebula::DataSet* result, - const cpp2::IndexQueryContext& ctx, - StoragePlan& plan, - StorageExpressionContext* exprCtx, - Expression* exp); - - void profilePlan(StoragePlan& plan); - - protected: - GraphSpaceID spaceId_; - std::unique_ptr planContext_; - std::unique_ptr context_; - folly::Executor* executor_{nullptr}; - nebula::DataSet resultDataSet_; - std::vector partResults_; - std::vector indexContexts_{}; - std::vector yieldCols_{}; - std::vector filterItems_; - // Save schemas when column is out of index, need to read from data - std::vector> schemas_; - std::vector deDupColPos_; - int64_t limit_ = -1; -}; - -} // namespace storage -} // namespace nebula - -#include "storage/index/LookupBaseProcessor-inl.h" -#endif // STORAGE_QUERY_LOOKUPBASEPROCESSOR_H_ diff --git a/src/storage/index/LookupProcessor.cpp b/src/storage/index/LookupProcessor.cpp index 81e0ddc4ece..272bfead534 100644 --- a/src/storage/index/LookupProcessor.cpp +++ b/src/storage/index/LookupProcessor.cpp @@ -1,17 +1,28 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* Copyright (c) 2021 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License. */ - #include "storage/index/LookupProcessor.h" -#include "storage/exec/DeDupNode.h" +#include +#include +#include "folly/Likely.h" +#include "interface/gen-cpp2/common_types.tcc" +#include "interface/gen-cpp2/meta_types.tcc" +#include "interface/gen-cpp2/storage_types.tcc" +#include "storage/exec/IndexDedupNode.h" +#include "storage/exec/IndexEdgeScanNode.h" +#include "storage/exec/IndexLimitNode.h" +#include "storage/exec/IndexNode.h" +#include "storage/exec/IndexProjectionNode.h" +#include "storage/exec/IndexSelectionNode.h" +#include "storage/exec/IndexVertexScanNode.h" namespace nebula { namespace storage { - ProcessorCounters kLookupCounters; - +// print Plan for debug +inline void printPlan(IndexNode* node, int tab = 0); void LookupProcessor::process(const cpp2::LookupIndexRequest& req) { if (executor_ != nullptr) { executor_->add([req, this]() { this->doProcess(req); }); @@ -21,125 +32,257 @@ void LookupProcessor::process(const cpp2::LookupIndexRequest& req) { } void LookupProcessor::doProcess(const cpp2::LookupIndexRequest& req) { - auto retCode = requestCheck(req); - if (limit_ == 0) { - onProcessFinished(); - onFinished(); - return; - } if (req.common_ref().has_value() && req.get_common()->profile_detail_ref().value_or(false)) { profileDetailFlag_ = true; } - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + auto code = prepare(req); + if (UNLIKELY(code != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { for (auto& p : req.get_parts()) { - pushResultCode(retCode, p); + pushResultCode(code, p); } onFinished(); return; } + auto plan = buildPlan(req); - // todo(doodle): specify by each query + if (UNLIKELY(profileDetailFlag_)) { + plan->enableProfileDetail(); + } + InitContext ctx; + code = plan->init(ctx); + if (UNLIKELY(code != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + for (auto& p : req.get_parts()) { + pushResultCode(code, p); + } + onFinished(); + return; + } if (!FLAGS_query_concurrently) { - runInSingleThread(req); + runInSingleThread(req.get_parts(), std::move(plan)); } else { - runInMultipleThread(req); + runInMultipleThread(req.get_parts(), std::move(plan)); } } +::nebula::cpp2::ErrorCode LookupProcessor::prepare(const cpp2::LookupIndexRequest& req) { + auto retCode = this->getSpaceVidLen(req.get_space_id()); + if (UNLIKELY(retCode != ::nebula::cpp2::ErrorCode::SUCCEEDED)) { + return retCode; + } + planContext_ = std::make_unique( + this->env_, req.get_space_id(), this->spaceVidLen_, this->isIntId_, req.common_ref()); + planContext_->isEdge_ = + req.get_indices().get_schema_id().getType() == nebula::cpp2::SchemaID::Type::edge_type; + context_ = std::make_unique(this->planContext_.get()); + std::string schemaName; + if (planContext_->isEdge_) { + auto edgeType = req.get_indices().get_schema_id().get_edge_type(); + auto schemaNameValue = env_->schemaMan_->toEdgeName(req.get_space_id(), edgeType); + if (!schemaNameValue.ok()) { + return ::nebula::cpp2::ErrorCode::E_EDGE_NOT_FOUND; + } + schemaName = schemaNameValue.value(); + context_->edgeType_ = edgeType; + } else { + auto tagId = req.get_indices().get_schema_id().get_tag_id(); + auto schemaNameValue = env_->schemaMan_->toTagName(req.get_space_id(), tagId); + if (!schemaNameValue.ok()) { + return ::nebula::cpp2::ErrorCode::E_TAG_NOT_FOUND; + } + schemaName = schemaNameValue.value(); + context_->tagId_ = tagId; + } + std::vector colNames; + for (auto& col : *req.get_return_columns()) { + colNames.emplace_back(schemaName + "." + col); + } + resultDataSet_ = ::nebula::DataSet(colNames); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; +} -void LookupProcessor::runInSingleThread(const cpp2::LookupIndexRequest& req) { - filterItems_.emplace_back(IndexFilterItem()); - auto plan = buildPlan(&filterItems_.front(), &resultDataSet_); - if (!plan.ok()) { - for (auto& p : req.get_parts()) { - pushResultCode(nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND, p); +std::unique_ptr LookupProcessor::buildPlan(const cpp2::LookupIndexRequest& req) { + std::vector> nodes; + for (auto& ctx : req.get_indices().get_contexts()) { + auto node = buildOneContext(ctx); + nodes.emplace_back(std::move(node)); + } + for (size_t i = 0; i < nodes.size(); i++) { + auto projection = + std::make_unique(context_.get(), *req.get_return_columns()); + projection->addChild(std::move(nodes[i])); + nodes[i] = std::move(projection); + } + if (nodes.size() > 1) { + std::vector dedupColumn; + if (context_->isEdge()) { + dedupColumn = std::vector{kSrc, kRank, kDst}; + } else { + dedupColumn = std::vector{kVid}; } - onFinished(); - return; + auto dedup = std::make_unique(context_.get(), dedupColumn); + for (auto& node : nodes) { + dedup->addChild(std::move(node)); + } + nodes.clear(); + nodes[0] = std::move(dedup); } + if (req.limit_ref().has_value()) { + auto limit = *req.get_limit(); + auto node = std::make_unique(context_.get(), limit); + node->addChild(std::move(nodes[0])); + nodes[0] = std::move(node); + } + return std::move(nodes[0]); +} - std::unordered_set failedParts; - for (const auto& partId : req.get_parts()) { - auto ret = plan.value().go(partId); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - if (failedParts.find(partId) == failedParts.end()) { - failedParts.emplace(partId); - handleErrorCode(ret, spaceId_, partId); +std::unique_ptr LookupProcessor::buildOneContext(const cpp2::IndexQueryContext& ctx) { + std::unique_ptr node; + DLOG(INFO) << ctx.get_column_hints().size(); + DLOG(INFO) << &ctx.get_column_hints(); + DLOG(INFO) << ::apache::thrift::SimpleJSONSerializer::serialize(ctx); + if (context_->isEdge()) { + node = std::make_unique( + context_.get(), ctx.get_index_id(), ctx.get_column_hints(), context_->env()->kvstore_); + } else { + node = std::make_unique( + context_.get(), ctx.get_index_id(), ctx.get_column_hints(), context_->env()->kvstore_); + } + if (ctx.filter_ref().is_set() && !ctx.get_filter().empty()) { + auto expr = Expression::decode(context_->objPool(), *ctx.filter_ref()); + auto filterNode = std::make_unique(context_.get(), expr); + filterNode->addChild(std::move(node)); + node = std::move(filterNode); + } + return node; +} + +void LookupProcessor::runInSingleThread(const std::vector& parts, + std::unique_ptr plan) { + // printPlan(plan.get()); + std::vector> datasetList; + std::vector<::nebula::cpp2::ErrorCode> codeList; + for (auto part : parts) { + DLOG(INFO) << "execute part:" << part; + plan->execute(part); + ::nebula::cpp2::ErrorCode code = ::nebula::cpp2::ErrorCode::SUCCEEDED; + decltype(datasetList)::value_type dataset; + do { + auto result = plan->next(); + if (!result.success()) { + code = result.code(); + break; + } + if (result.hasData()) { + dataset.emplace_back(std::move(result).row()); + } else { + break; } + } while (true); + datasetList.emplace_back(std::move(dataset)); + codeList.emplace_back(code); + } + for (size_t i = 0; i < datasetList.size(); i++) { + if (codeList[i] == ::nebula::cpp2::ErrorCode::SUCCEEDED) { + while (!datasetList[i].empty()) { + resultDataSet_.emplace_back(std::move(datasetList[i].front())); + datasetList[i].pop_front(); + } + } else { + DLOG(INFO) << int(codeList[i]); + handleErrorCode(codeList[i], context_->spaceId(), parts[i]); } } if (UNLIKELY(profileDetailFlag_)) { - profilePlan(plan.value()); + profilePlan(plan.get()); } onProcessFinished(); onFinished(); } -void LookupProcessor::runInMultipleThread(const cpp2::LookupIndexRequest& req) { - // As for lookup, once requestCheck is done, the info in RunTimeContext won't - // be changed anymore. So we only use one RunTimeContext, could make it per - // partition later if necessary. - for (size_t i = 0; i < req.get_parts().size(); i++) { - nebula::DataSet result = resultDataSet_; - partResults_.emplace_back(std::move(result)); - filterItems_.emplace_back(IndexFilterItem()); - } - size_t i = 0; - std::vector>> futures; - for (const auto& partId : req.get_parts()) { - futures.emplace_back(runInExecutor(&filterItems_[i], &partResults_[i], partId)); - i++; +void LookupProcessor::runInMultipleThread(const std::vector& parts, + std::unique_ptr plan) { + std::vector> planCopy = reproducePlan(plan.get(), parts.size()); + using ReturnType = std::tuple>; + std::vector> futures; + for (size_t i = 0; i < parts.size(); i++) { + futures.emplace_back(folly::via( + executor_, [this, plan = std::move(planCopy[i]), part = parts[i]]() -> ReturnType { + ::nebula::cpp2::ErrorCode code = ::nebula::cpp2::ErrorCode::SUCCEEDED; + std::deque dataset; + plan->execute(part); + do { + auto result = plan->next(); + if (!result.success()) { + code = result.code(); + break; + } + if (result.hasData()) { + dataset.emplace_back(std::move(result).row()); + } else { + break; + } + } while (true); + if (UNLIKELY(profileDetailFlag_)) { + profilePlan(plan.get()); + } + return {part, code, dataset}; + })); } - - folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { + folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) { CHECK(!t.hasException()); const auto& tries = t.value(); for (size_t j = 0; j < tries.size(); j++) { CHECK(!tries[j].hasException()); - const auto& [code, partId] = tries[j].value(); - if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(code, spaceId_, partId); + auto& [partId, code, dataset] = tries[j].value(); + if (code == ::nebula::cpp2::ErrorCode::SUCCEEDED) { + for (auto& row : dataset) { + resultDataSet_.emplace_back(std::move(row)); + } } else { - resultDataSet_.append(std::move(partResults_[j])); + handleErrorCode(code, context_->spaceId(), partId); } } - // when run each part concurrently, we need to dedup again. - if (!deDupColPos_.empty()) { - DeDupNode::dedup(resultDataSet_.rows, deDupColPos_); - } + DLOG(INFO) << "finish"; this->onProcessFinished(); this->onFinished(); }); } - -folly::Future> LookupProcessor::runInExecutor( - IndexFilterItem* filterItem, nebula::DataSet* result, PartitionID partId) { - return folly::via(executor_, [this, filterItem, result, partId]() { - auto plan = buildPlan(filterItem, result); - if (!plan.ok()) { - return std::make_pair(nebula::cpp2::ErrorCode::E_INDEX_NOT_FOUND, partId); +std::vector> LookupProcessor::reproducePlan(IndexNode* root, + size_t count) { + std::vector> ret(count); + for (size_t i = 0; i < count; i++) { + ret[i] = root->copy(); + DLOG(INFO) << ret[i].get(); + } + for (auto& child : root->children()) { + auto childPerPlan = reproducePlan(child.get(), count); + for (size_t i = 0; i < count; i++) { + ret[i]->addChild(std::move(childPerPlan[i])); } - auto ret = plan.value().go(partId); - if (UNLIKELY(this->profileDetailFlag_)) { - profilePlan(plan.value()); + } + return ret; +} +void LookupProcessor::profilePlan(IndexNode* root) { + std::queue q; + q.push(root); + while (!q.empty()) { + auto node = q.front(); + q.pop(); + auto id = node->identify(); + auto iter = profileDetail_.find(id); + if (iter == profileDetail_.end()) { + profileDetail_[id] = node->duration().elapsedInUSec(); + } else { + iter->second += node->duration().elapsedInUSec(); } - return std::make_pair(ret, partId); - }); + for (auto& child : node->children()) { + q.push(child.get()); + } + } } - -void LookupProcessor::onProcessFinished() { - if (context_->isEdge()) { - std::transform(resultDataSet_.colNames.begin(), - resultDataSet_.colNames.end(), - resultDataSet_.colNames.begin(), - [this](const auto& col) { return context_->edgeName_ + "." + col; }); - } else { - std::transform(resultDataSet_.colNames.begin(), - resultDataSet_.colNames.end(), - resultDataSet_.colNames.begin(), - [this](const auto& col) { return context_->tagName_ + "." + col; }); +inline void printPlan(IndexNode* node, int tab) { + for (auto& child : node->children()) { + printPlan(child.get(), tab + 1); } - resp_.set_data(std::move(resultDataSet_)); } - } // namespace storage } // namespace nebula diff --git a/src/storage/index/LookupProcessor.h b/src/storage/index/LookupProcessor.h index cd0b6fad795..1751f8f1c27 100644 --- a/src/storage/index/LookupProcessor.h +++ b/src/storage/index/LookupProcessor.h @@ -1,47 +1,45 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. +/* Copyright (c) 2021 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License. */ - -#ifndef STORAGE_QUERY_LOOKUP_H_ -#define STORAGE_QUERY_LOOKUP_H_ - +#pragma once #include "common/base/Base.h" -#include "storage/index/LookupBaseProcessor.h" - +#include "common/base/ErrorOr.h" +#include "interface/gen-cpp2/storage_types.h" +#include "storage/BaseProcessor.h" +#include "storage/exec/IndexNode.h" namespace nebula { namespace storage { - extern ProcessorCounters kLookupCounters; -class LookupProcessor - : public LookupBaseProcessor { +class LookupProcessor : public BaseProcessor { public: static LookupProcessor* instance(StorageEnv* env, const ProcessorCounters* counters = &kLookupCounters, folly::Executor* executor = nullptr) { return new LookupProcessor(env, counters, executor); } - - void process(const cpp2::LookupIndexRequest& req) override; - - protected: - LookupProcessor(StorageEnv* env, const ProcessorCounters* counters, folly::Executor* executor) - : LookupBaseProcessor( - env, counters, executor) {} - - void onProcessFinished() override; + void process(const cpp2::LookupIndexRequest& req); private: - void runInSingleThread(const cpp2::LookupIndexRequest& req); - void runInMultipleThread(const cpp2::LookupIndexRequest& req); - - folly::Future> runInExecutor( - IndexFilterItem* filterItem, nebula::DataSet* result, PartitionID partId); - + LookupProcessor(StorageEnv* env, const ProcessorCounters* counters, folly::Executor* executor) + : BaseProcessor(env, counters), executor_(executor) {} void doProcess(const cpp2::LookupIndexRequest& req); + void onProcessFinished() { + BaseProcessor::resp_.set_data(std::move(resultDataSet_)); + } + void profilePlan(IndexNode* plan); + void runInSingleThread(const std::vector& parts, std::unique_ptr plan); + void runInMultipleThread(const std::vector& parts, std::unique_ptr plan); + ::nebula::cpp2::ErrorCode prepare(const cpp2::LookupIndexRequest& req); + std::unique_ptr buildPlan(const cpp2::LookupIndexRequest& req); + std::unique_ptr buildOneContext(const cpp2::IndexQueryContext& ctx); + std::vector> reproducePlan(IndexNode* root, size_t count); + folly::Executor* executor_{nullptr}; + std::unique_ptr planContext_; + std::unique_ptr context_; + nebula::DataSet resultDataSet_; + std::vector partResults_; }; - } // namespace storage } // namespace nebula -#endif // STORAGE_QUERY_LOOKUP_H_ diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index d976ab2b209..c664696de53 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -708,6 +708,20 @@ nebula_add_test( gtest ) +nebula_add_test( + NAME + index_test + SOURCES + IndexTest.cpp + OBJECTS + ${storage_test_deps} + LIBRARIES + ${ROCKSDB_LIBRARIES} + ${THRIFT_LIBRARIES} + ${PROXYGEN_LIBRARIES} + gtest +) + nebula_add_executable( NAME chain_update_edge_test diff --git a/src/storage/test/IndexScanTest.cpp b/src/storage/test/IndexScanTest.cpp index 3e17741930f..59d5df1ef6f 100644 --- a/src/storage/test/IndexScanTest.cpp +++ b/src/storage/test/IndexScanTest.cpp @@ -76,7 +76,8 @@ static std::string genEdgeIndexKey(meta::SchemaManager* schemaMan, VertexID dst) { auto reader = RowReaderWrapper::getEdgePropReader(schemaMan, prop, spaceId, type); auto values = collectIndexValues(reader.get(), index->get_fields()); - auto indexKey = NebulaKeyUtils::edgeIndexKey(partId, index->get_index_id(), src, 0, dst, values); + auto indexKey = + NebulaKeyUtils::edgeIndexKeys(partId, index->get_index_id(), src, 0, dst, values)[0]; return indexKey; } diff --git a/src/storage/test/IndexTest.cpp b/src/storage/test/IndexTest.cpp new file mode 100644 index 00000000000..6c4e34ad122 --- /dev/null +++ b/src/storage/test/IndexTest.cpp @@ -0,0 +1,1937 @@ +/* Copyright (c) 2018 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include + +#include +#include + +#include "codec/RowReaderWrapper.h" +#include "codec/RowWriterV2.h" +#include "common/base/ObjectPool.h" +#include "common/expression/ConstantExpression.h" +#include "common/expression/PropertyExpression.h" +#include "common/expression/RelationalExpression.h" +#include "common/utils/NebulaKeyUtils.h" +#include "kvstore/KVEngine.h" +#include "kvstore/KVIterator.h" +#include "storage/exec/IndexDedupNode.h" +#include "storage/exec/IndexEdgeScanNode.h" +#include "storage/exec/IndexLimitNode.h" +#include "storage/exec/IndexNode.h" +#include "storage/exec/IndexProjectionNode.h" +#include "storage/exec/IndexSelectionNode.h" +#include "storage/exec/IndexVertexScanNode.h" +#include "storage/test/IndexTestUtil.h" +namespace nebula { +namespace storage { +namespace { +int schemaVer = 2; +using std::string_literals::operator""s; +} // namespace +/** + * IndexScanTest + * + * Test: + * 1. Vertex/Edge + * 2. back to table or not + * 3. different value type + * a. int/float/bool/fix_string/time/date/datetime + * b. compound index + * 4. range/prefix + * a. prefix(equal) + * b. range with begin is include/exclude/-INF + * c. range with end id include/exclude/+INF + * 5. nullable + * 6. multiPart + * Case: + * ┌────────────┬───────────┬───────────────┬─────────────────────────┬─────────┐ + * │ section1 │ name │ case │ description │ NOTICE │ + * ├────────────┼───────────┼───────────────┼─────────────────────────┼─────────┤ + * | Base | Base | | | | + * | ├───────────┼───────────────┼─────────────────────────┼─────────┤ + * | | Vertex | IndexOnly | | | + * | | Edge | BackToTable | | | + * | ├───────────┼───────────────┼─────────────────────────┼─────────┤ + * | | MultiPart | | | | + * ├────────────┼───────────┼───────────────┼─────────────────────────┼─────────┤ + * | Value Type | Int | Truncate | Test different interval | | + * | | Float | NoTruncate | with each type of Value | | + * | | Bool | INCLUDE_BEGIN | | | + * | | String | INCLUDE_END | | | + * | | Time | EXCLUDE_BEGIN | | | + * | | Date | EXCLUDE_END | | | + * | | DateTime | POSITIVE_INF | | | + * | | Compound | NEGATIVE_INF | | | + * | | Nullable | | | | + * | | Geography | | | | + * └────────────┴───────────┴───────────────┴─────────────────────────┴─────────┘ + * + * ┌─┬┐ + * │ ││ + * ├─┼┤ + * └─┴┘ + */ +class IndexScanTestHelper { + public: + void setIndex(IndexVertexScanNode* node, std::shared_ptr<::nebula::meta::cpp2::IndexItem> index) { + node->getIndex = [index](std::shared_ptr<::nebula::meta::cpp2::IndexItem>& ret) { + ret = index; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + } + void setIndex(IndexEdgeScanNode* node, std::shared_ptr<::nebula::meta::cpp2::IndexItem> index) { + node->getIndex = [index](std::shared_ptr<::nebula::meta::cpp2::IndexItem>& ret) { + ret = index; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + } + void setTag(IndexVertexScanNode* node, + std::shared_ptr<::nebula::meta::NebulaSchemaProvider> schema) { + node->getTag = [schema](IndexVertexScanNode::TagSchemas& tag) { + tag = std::vector>{schema}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + } + void setEdge(IndexEdgeScanNode* node, + std::shared_ptr<::nebula::meta::NebulaSchemaProvider> schema) { + node->getEdge = [schema](IndexEdgeScanNode::EdgeSchemas& edge) { + edge = std::vector>{schema}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + } + void setFatal(IndexScanNode* node, bool value) { node->fatalOnBaseNotFound_ = value; } +}; +class IndexScanTest : public ::testing::Test { + protected: + using Schema = ::nebula::meta::NebulaSchemaProvider; + using IndexItem = ::nebula::meta::cpp2::IndexItem; + using ColumnHint = ::nebula::storage::cpp2::IndexColumnHint; + static ColumnHint makeColumnHint(const std::string& name, const Value& value) { + ColumnHint hint; + hint.set_column_name(name); + hint.set_begin_value(value); + hint.set_scan_type(cpp2::ScanType::PREFIX); + return hint; + } + template + static ColumnHint makeColumnHint(const std::string& name, const Value& begin, const Value& end) { + ColumnHint hint; + hint.set_column_name(name); + hint.set_scan_type(cpp2::ScanType::RANGE); + hint.set_begin_value(begin); + hint.set_end_value(end); + hint.set_include_begin(includeBegin); + hint.set_include_end(includeEnd); + return hint; + } + template + static ColumnHint makeBeginColumnHint(const std::string& name, const Value& begin) { + ColumnHint hint; + hint.set_column_name(name); + hint.set_scan_type(cpp2::ScanType::RANGE); + hint.set_begin_value(begin); + hint.set_include_begin(include); + return hint; + } + template + static ColumnHint makeEndColumnHint(const std::string& name, const Value& end) { + ColumnHint hint; + hint.set_column_name(name); + hint.set_scan_type(cpp2::ScanType::RANGE); + hint.set_end_value(end); + hint.set_include_end(include); + return hint; + } + static std::vector> encodeTag( + const std::vector& rows, + TagID tagId, + std::shared_ptr schema, + std::vector> indices) { + std::vector> ret(indices.size() + 1); + for (size_t i = 0; i < rows.size(); i++) { + auto key = NebulaKeyUtils::vertexKey(8, 0, std::to_string(i), tagId); + RowWriterV2 writer(schema.get()); + for (size_t j = 0; j < rows[i].size(); j++) { + writer.setValue(j, rows[i][j]); + } + writer.finish(); + auto value = writer.moveEncodedStr(); + CHECK(ret[0].insert({key, value}).second); + RowReaderWrapper reader(schema.get(), folly::StringPiece(value), schemaVer); + for (size_t j = 0; j < indices.size(); j++) { + auto& index = indices[j]; + auto indexValue = IndexKeyUtils::collectIndexValues(&reader, index->get_fields()).value(); + auto indexKey = IndexKeyUtils::vertexIndexKeys( + 8, 0, index->get_index_id(), std::to_string(i), std::move(indexValue))[0]; + CHECK(ret[j + 1].insert({indexKey, ""}).second); + } + } + return ret; + } + static std::vector> encodeEdge( + const std::vector& rows, + EdgeType edgeType, + std::shared_ptr schema, + std::vector> indices) { + std::vector> ret(indices.size() + 1); + for (size_t i = 0; i < rows.size(); i++) { + auto key = NebulaKeyUtils::edgeKey(8, 0, std::to_string(i), edgeType, i, std::to_string(i)); + RowWriterV2 writer(schema.get()); + for (size_t j = 0; j < rows[i].size(); j++) { + writer.setValue(j, rows[i][j]); + } + writer.finish(); + auto value = writer.moveEncodedStr(); + CHECK(ret[0].insert({key, value}).second); + RowReaderWrapper reader(schema.get(), folly::StringPiece(value), schemaVer); + for (size_t j = 0; j < indices.size(); j++) { + auto& index = indices[j]; + auto indexValue = IndexKeyUtils::collectIndexValues(&reader, index->get_fields()).value(); + auto indexKey = IndexKeyUtils::edgeIndexKeys(8, + 0, + index->get_index_id(), + std::to_string(i), + i, + std::to_string(i), + std::move(indexValue))[0]; + CHECK(ret[j + 1].insert({indexKey, ""}).second); + } + } + return ret; + } + static PlanContext* getPlanContext() { + static std::unique_ptr ctx = std::make_unique(nullptr, 0, 8, false); + return ctx.get(); + } + static std::unique_ptr makeContext(TagID tagId, EdgeType edgeType) { + auto ctx = std::make_unique(getPlanContext()); + ctx->tagId_ = tagId; + ctx->edgeType_ = edgeType; + return ctx; + } +}; +TEST_F(IndexScanTest, Base) { + auto rows = R"( + int | int + 1 | 2 + 1 | 3 + )"_row; + auto schema = R"( + a | int ||false + b | int ||false + )"_schema; + auto indices = R"( + TAG(t,1) + (i1,2):a + (i2,3):b + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + { // Case 1 + std::vector columnHints{ + makeColumnHint("a", Value(1)) // a=1 + }; + IndexID indexId = 2; + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[0]); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid, "a"}; + scanNode->init(initCtx); + scanNode->execute(0); + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int + 0 | 1 + 1 | 1 + )"_row; + std::vector colOrder = {kVid, "a"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + for (size_t j = 0; j < expect[i].size(); j++) { + ASSERT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } // End of Case 1 + { // Case 2 + std::vector columnHints{ + makeColumnHint("b", Value(3)) // b=3 + }; + IndexID indexId = 3; + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[1]); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid, "b"}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int + 1 | 3 + )"_row; + std::vector colOrder = {kVid, "b"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + for (size_t j = 0; j < expect[i].size(); j++) { + ASSERT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } // End of Case 2 +} +TEST_F(IndexScanTest, Vertex) { + auto rows = R"( + int | int + 1 | 2 + 1 | 3 + )"_row; + auto schema = R"( + a | int | | false + b | int | | false + )"_schema; + auto indices = R"( + TAG(t,1) + (i1,2):a + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + std::vector columnHints{ + makeColumnHint("a", Value(1)) // a=1 + }; + IndexID indexId = 0; + auto context = makeContext(1, 0); + { // Case 1: IndexOnly + // Only put index key-values into kvstore + for (auto& item : kv[1]) { + kvstore->put(item.first, item.second); + } + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[0]); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid, "a"}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int + 0 | 1 + 1 | 1 + )"_row; + std::vector colOrder = {kVid, "a"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + for (size_t j = 0; j < expect[i].size(); j++) { + ASSERT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } // End of Case 1 + { // Case 2: Access base data + // Put base data key-values into kvstore + for (auto& item : kv[0]) { + kvstore->put(item.first, item.second); + } + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[0]); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid, "b"}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int + 0 | 2 + 1 | 3 + )"_row; + std::vector colOrder = {kVid, "b"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + VLOG(1) << result[i]; + for (size_t j = 0; j < expect[i].size(); j++) { + ASSERT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } // End of Case 2 +} +TEST_F(IndexScanTest, Edge) { + auto rows = R"( + int | int | int + 5 | 2 | 1 + 10 | 3 | 2 + 20 | 3 | 3 + )"_row; + auto schema = R"( + a | int | | false + b | int | | false + c | int | | false + )"_schema; + auto indices = R"( + EDGE(e,1) + (i1,2):b,c + )"_index(schema); + auto kv = encodeEdge(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + std::vector columnHints{ + makeColumnHint("b", Value(3)), // b=3 + }; + IndexID indexId = 0; + auto context = makeContext(0, 1); + { // Case 1: IndexOnly + for (auto& item : kv[1]) { + kvstore->put(item.first, item.second); + } + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[0]); + helper.setEdge(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kSrc, kRank, kDst, "c"}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int | string | int + 1 | 1 | 1 | 2 + 2 | 2 | 2 | 3 + )"_row; + std::vector colOrder = {kSrc, kRank, kDst, "c"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + for (size_t j = 0; j < expect[i].size(); j++) { + EXPECT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } // End of Case 1 + { // Case 2: Access base data + for (auto& item : kv[0]) { + kvstore->put(item.first, item.second); + } + auto scanNode = + std::make_unique(context.get(), indexId, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), indices[0]); + helper.setEdge(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kSrc, kRank, kDst, "a"}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + auto expect = R"( + string | int | string | int + 1 | 1 | 1 | 10 + 2 | 2 | 2 | 20 + )"_row; + std::vector colOrder = {kSrc, kRank, kDst, "a"}; + ASSERT_EQ(result.size(), expect.size()); + for (size_t i = 0; i < result.size(); i++) { + ASSERT_EQ(result[i].size(), expect[i].size()); + for (size_t j = 0; j < expect[i].size(); j++) { + EXPECT_EQ(expect[i][j], result[i][initCtx.retColMap[colOrder[j]]]); + } + } + } +} +TEST_F(IndexScanTest, Int) { + auto rows = R"( + int | int | int + 1 | -1 | -10 + 2 | 1 | -9223372036854775808 + 3 | 0 | -1 + 4 | 9223372036854775807 | 0 + 5 | -9223372036854775808 | 9223372036854775807 + 6 | | 0 + )"_row; + auto schema = R"( + a | int | | false + b | int | | true + c | int | | false + )"_schema; + auto indices = R"( + TAG(t,1) + (i1,2):a + (i2,3):b + (i3,4):c + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [](auto... vidList) { + std::vector ret; + std::vector value; + (value.push_back(std::to_string(vidList)), ...); + for (auto& v : value) { + Row row; + row.emplace_back(v); + ret.emplace_back(std::move(row)); + } + return ret; + }; + const int64_t MAX = 0x7fffffffffffffff; + const int64_t MIN = -MAX - 1; + /* Case 1: Prefix */ + { + std::vector columnHints = {makeColumnHint("a", 1)}; // a=1; + check(indices[0], columnHints, expect(0), "case1.1"); // + columnHints = {makeColumnHint("b", MAX)}; // b=MAX + check(indices[1], columnHints, expect(3), "case1.2"); // + columnHints = {makeColumnHint("b", MIN)}; // b=MIN + check(indices[1], columnHints, expect(4), "case1.3"); // + columnHints = {makeColumnHint("c", 0)}; // c=0 + check(indices[2], columnHints, expect(3, 5), "case1.4"); // + } // End of Case 1 + /* Case 2: [x, INF) */ + { + std::vector columnHints = {makeBeginColumnHint("a", -1)}; // Case2.1: a >= -1 + check(indices[0], columnHints, expect(0, 1, 2, 3, 4, 5), "case2.1"); // + columnHints = {makeBeginColumnHint("a", 4)}; // Case2.2: a>=4 + check(indices[0], columnHints, expect(3, 4, 5), "case2.2"); // + columnHints = {makeBeginColumnHint("a", 7)}; // Case2.3: a>=7 + check(indices[0], columnHints, {}, "case2.3"); // + columnHints = {makeBeginColumnHint("b", MIN)}; // Case2.4: b>=INT_MIN + check(indices[1], columnHints, expect(4, 0, 2, 1, 3), "case2.4"); // + columnHints = {makeBeginColumnHint("b", MAX)}; // Case2.5: b>=INT_MAX + check(indices[1], columnHints, expect(3), "case2.5"); // + columnHints = {makeBeginColumnHint("b", 0)}; // Case2.6: b>=0 + check(indices[1], columnHints, expect(2, 1, 3), "case2.6"); // + columnHints = {makeBeginColumnHint("c", MIN)}; // Case2.7: c>=INT_MIN + check(indices[2], columnHints, expect(1, 0, 2, 3, 5, 4), "case2.7"); // + columnHints = {makeBeginColumnHint("c", MAX)}; // Case2.8: c>=INT_MAX + check(indices[2], columnHints, expect(4), "case2.8"); // + columnHints = {makeBeginColumnHint("c", 0)}; // Case2.9: c>=0 + check(indices[2], columnHints, expect(3, 5, 4), "case2.9"); // + } // End of Case 2 + /* Case 3: [x, y) */ + { + std::vector columnHints; // + columnHints = {makeColumnHint("a", -1, 10)}; // Case3.1: -1<=a<10 + check(indices[0], columnHints, expect(0, 1, 2, 3, 4, 5), "case3.1"); // + columnHints = {makeColumnHint("a", -100, 4)}; // Case3.2: -100<=a<4 + check(indices[0], columnHints, expect(0, 1, 2), "case3.2"); // + columnHints = {makeColumnHint("a", 4, 100)}; // Case3.3: 4<=a<100 + check(indices[0], columnHints, expect(3, 4, 5), "case3.3"); // + columnHints = {makeColumnHint("a", 2, 5)}; // Case3.4: 2<=a<5 + check(indices[0], columnHints, expect(1, 2, 3), "case3.4"); // + columnHints = {makeColumnHint("a", -100, 0)}; // Case3.5: -100<=a<0 + check(indices[0], columnHints, {}, "case3.5"); // + columnHints = {makeColumnHint("a", 10, 100)}; // Case3.6: 10<=a<100 + check(indices[0], columnHints, {}, "case3.6"); // + columnHints = {makeColumnHint("b", MIN, MAX)}; // Case3.7: MIN<=b("c", MIN, MAX)}; // Case3.8: MIN<=c columnHints; // + columnHints = {makeBeginColumnHint("a", 3)}; // Case 4.1: a>3 + check(indices[0], columnHints, expect(3, 4, 5), "case4.1"); // + columnHints = {makeBeginColumnHint("b", MIN)}; // Case 4.2: b>MIN + check(indices[1], columnHints, expect(0, 2, 1, 3), "case4.2"); // + columnHints = {makeBeginColumnHint("b", MAX)}; // Case4.3: b>MAX + check(indices[1], columnHints, {}, "case4.3"); // + columnHints = {makeBeginColumnHint("c", MIN)}; // Case4.4: c>MIN + check(indices[2], columnHints, expect(0, 2, 3, 5, 4), "case4.4"); // + columnHints = {makeBeginColumnHint("c", MAX - 1)}; // Case4.5: c>MAX-1 + check(indices[2], columnHints, expect(4), "case4.4"); // + } // End of Case 4 + /* Case 5: (x, y] */ + { + std::vector columnHints; // + columnHints = {makeColumnHint("a", 1, 6)}; // Case5.1: 1("a", 0, 3)}; // Case5.2: 0("b", MIN, MIN)}; // Case5.3: MIN("b", MAX, MAX)}; // Case5.4: MAX("b", 0, MAX)}; // Case5.5: 0("c", -1, MAX)}; // Case5.6: -1 columnHints; // + columnHints = {makeEndColumnHint("a", 4)}; // Case6.1: a<=4 + check(indices[0], columnHints, expect(0, 1, 2, 3), "case6.1"); // + columnHints = {makeEndColumnHint("a", 1)}; // Case6.2: a<=1 + check(indices[0], columnHints, expect(0), "case6.2"); // + columnHints = {makeEndColumnHint("b", MIN)}; // Case6.3: b<=MIN + check(indices[1], columnHints, expect(4), "case6.3"); // + columnHints = {makeEndColumnHint("b", MAX)}; // Case6.4: b<=MAX + check(indices[1], columnHints, expect(4, 0, 2, 1, 3), "casae6.4"); // + columnHints = {makeEndColumnHint("c", MIN)}; // Case6.5: c<=MIN + check(indices[2], columnHints, expect(1), "case6.5"); // + columnHints = {makeEndColumnHint("c", MAX)}; // Case6.6: c<=MAX + check(indices[2], columnHints, expect(1, 0, 2, 3, 5, 4), "case6.6"); // + } // End of Case 6 + /* Case 7: (-INF, y) */ + { + std::vector columnHints; // + columnHints = {makeEndColumnHint("a", 4)}; // Case7.1: a<4 + check(indices[0], columnHints, expect(0, 1, 2), "case7.1"); // + columnHints = {makeEndColumnHint("a", 1)}; // Case7.2: a<1 + check(indices[0], columnHints, {}, "case7.2"); // + columnHints = {makeEndColumnHint("b", MIN)}; // Case7.3: b("b", MAX)}; // Case7.4: b("c", MIN)}; // Case7.5: c("c", MAX)}; // Case7.6: c::infinity(); + auto rows = R"( +float | float | float | int +-100.0 | 0.0 | | 0 +-20.0 | -0.0 | | 1 +-5.0 | | 1.7976931348623157e+308 | 2 +-0.0 | <-INF> | 1.7976931348623157e+308 | 3 +0.0 | | | 4 +1.234e10 | <-NaN> | | 5 +5.0 | 4.9406564584124654e-324 | <-INF> | 6 +20.0 | -4.9406564584124654e-324 | <-INF> | 7 +100.0 | 2.2250738585072009e-308 | | 8 +1.2345e10 | -2.2250738585072009e-308 | | 9 +-7e-10 | 2.2250738585072014e-308 | <-NaN> | 10 +7e10 | -2.2250738585072014e-308 | <-NaN> | 11 +-7e10 | 1.7976931348623157e+308 | -0.0 | 12 +7e-10 | -1.7976931348623157e+308 | 0.0 | 13 + )"_row; + auto schema = R"( + a | double | | false + b | double | | false + c | double | | true + )"_schema; + auto indices = R"( + EDGE(e,1) + (i1,2):a + (i2,3):b + (i3,4):c + )"_index(schema); + auto kv = encodeEdge(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(0, 1); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setEdge(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kSrc}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [](auto... vidList) { + std::vector ret; + std::vector value; + (value.push_back(std::to_string(vidList)), ...); + for (auto& v : value) { + Row row; + row.emplace_back(v); + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: prefix */ { + auto hint = [](const char* name, double value) { + return std::vector{makeColumnHint(name, value)}; + }; + check(indices[0], hint("a", 1000.0), {}, "case1.1"); // Case1.1: a=1000.0 + check(indices[0], hint("a", 0.0), expect(3, 4), "case1.2"); // Case1.2: a=0.0 + check(indices[1], hint("b", MAX_NV), expect(12), "case1.3"); // Case1.3: b=MAX_NV + check(indices[1], hint("b", MIN_NV), expect(10), "case1.4"); // Case1.4: b=MIN_NV + check(indices[1], hint("b", MAX_SV), expect(8), "case1.5"); // Case1.5: b=MAX_SV + check(indices[1], hint("b", MIN_SV), expect(6), "case1.6"); // Case1.6: b=MIN_SV + check(indices[1], hint("b", -MAX_NV), expect(13), "case1.7"); // Case1.7: b=-MAX_NV + check(indices[1], hint("b", -MIN_NV), expect(11), "case1.8"); // Case1.8: b=-MIN_NV + check(indices[1], hint("b", -MAX_SV), expect(9), "case1.9"); // Case1.9: b=-MAX_SV + check(indices[1], hint("b", -MIN_SV), expect(7), "case1.10"); // Case1.10 b=-MIN_SV + check(indices[1], hint("b", 0.0), expect(0, 1), "case1.11"); // Case1.11: b=0.0 + check(indices[1], hint("b", -0.0), expect(0, 1), "case1.12"); // Case1.12: b=-0.0 + check(indices[1], hint("b", INF), expect(2), "case1.13"); // Case1.13: b= + check(indices[1], hint("b", -INF), expect(3), "case1.14"); // Case1.14: b=<-INF> + check(indices[2], hint("c", INF), expect(4, 5), "case1.15"); // Case1.15: c= + } // End of Case 1 + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 + auto aOrder = expect(12, 0, 1, 2, 10, 3, 4, 13, 6, 7, 8, 5, 9, 11); + auto bOrder = expect(3, 13, 11, 9, 7, 0, 1, 6, 8, 10, 12, 2); + auto cOrder = expect(6, 7, 12, 13, 2, 3, 4, 5); + /* Case 2: [x, INF) */ { + auto hint = [](const char* name, double value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + auto slice = [](decltype(aOrder) all, size_t start) { + return decltype(all){all.begin() + start, all.end()}; + }; + check(indices[0], hint("a", -100.0), slice(aOrder, 1), "case2.1"); // Case 2.1: a>=-100 + check(indices[0], hint("a", 0.0), slice(aOrder, 5), "case2.2"); // Case 2.2: a>=0.0 + // Case 2.3~2.14: a>={each of $val} + std::vector val{ + -INF, -MAX_NV, -MIN_NV, -MAX_SV, -MIN_SV, -0.0, 0.0, MIN_SV, MAX_SV, MIN_NV, MAX_NV, INF}; + for (size_t i = 0; i < val.size(); i++) { + std::string case_ = fmt::format("case2.{}", i + 3); + auto offset = i; + if (val[i] == 0 && val[i - 1] == 0) { + offset--; + } + check(indices[1], hint("b", val[i]), slice(bOrder, offset), case_); + } + check(indices[2], hint("c", -INF), slice(cOrder, 0), "case2.15"); + check(indices[2], hint("c", 0.0), slice(cOrder, 2), "case2.16"); + check(indices[2], hint("c", MAX_NV), slice(cOrder, 4), "case2.17"); + check(indices[2], hint("c", INF), slice(cOrder, 6), "case2.18"); + } + /* Case 3: [x, y)*/ { + auto hint = [](const char* name, double left, double right) { + return std::vector{makeColumnHint(name, left, right)}; + }; + auto slice = [](decltype(aOrder) all, size_t start, size_t end) { + return decltype(all){all.begin() + start, all.begin() + std::min(end, all.size())}; + }; + check( + indices[0], hint("a", -100.0, -0.0), slice(aOrder, 1, 5), "case3.1"); // Case3.1:-100<=a<0 + check(indices[0], hint("a", 10, 1e9), slice(aOrder, 9, 11), "case3.2"); + check(indices[0], hint("a", 1, 2), {}, "case3.3"); + check(indices[0], hint("a", -INF, INF), aOrder, "case3.4"); + check(indices[0], hint("a", -INF, 0), slice(aOrder, 0, 5), "case3.5"); + check(indices[0], hint("a", 0, INF), slice(aOrder, 5, 14), "case3.6"); + // Case 3.7~3.18: b<{each of $val} + std::vector val{ + -INF, -MAX_NV, -MIN_NV, -MAX_SV, -MIN_SV, -0.0, 0.0, MIN_SV, MAX_SV, MIN_NV, MAX_NV, INF}; + for (size_t i = 0; i < val.size(); i++) { + std::string case_ = fmt::format("case3.{}", i + 7); + auto offset = i; + if (val[i] == 0 && val[i - 1] == 0) { + offset--; + } + check(indices[1], hint("b", -INF, val[i]), slice(bOrder, 0, offset), case_); + } + check(indices[2], hint("c", -INF, INF), slice(cOrder, 0, 6), "case3.19"); + } + /* Case 4: (x, INF)*/ { + auto hint = [](const char* name, double value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + auto slice = [](decltype(aOrder) all, size_t start) { + return decltype(all){all.begin() + start, all.end()}; + }; + check(indices[0], hint("a", 100), slice(aOrder, 11), "case4.1"); + check(indices[1], hint("b", INF), {}, "case4.2"); + int64_t x; + ::memcpy(&x, &INF, 8); + // int64_t x = *reinterpret_cast(&INF); + x--; + double y; + ::memcpy(&y, &x, 8); + // double y = *reinterpret_cast(&x); + check(indices[1], hint("b", y), slice(bOrder, 11), "case4.3"); + check(indices[2], hint("c", INF), {}, "case4.4"); + check(indices[2], hint("c", y), slice(cOrder, 6), "case4.5"); + } /* Case 5: (x, y]*/ + { + auto hint = [](const char* name, double left, double right) { + return std::vector{makeColumnHint(name, left, right)}; + }; + auto slice = [](decltype(aOrder) all, size_t start, size_t end) { + return decltype(all){all.begin() + start, all.begin() + end}; + }; + check( + indices[0], hint("a", -100.0, -0.0), slice(aOrder, 2, 7), "case5.1"); // Case3.1:-100<=a<0 + check(indices[0], hint("a", 10, 1e9), slice(aOrder, 9, 11), "case5.2"); + check(indices[0], hint("a", 1, 2), {}, "case5.3"); + check(indices[0], hint("a", -INF, INF), aOrder, "case5.4"); + check(indices[0], hint("a", -INF, 0), slice(aOrder, 0, 7), "case5.5"); + check(indices[0], hint("a", 0, INF), slice(aOrder, 7, 14), "case5.6"); + // Case 5.7~5.18: b>{each of $val} + std::vector val{ + -INF, -MAX_NV, -MIN_NV, -MAX_SV, -MIN_SV, -0.0, 0.0, MIN_SV, MAX_SV, MIN_NV, MAX_NV, INF}; + for (size_t i = 0; i < val.size(); i++) { + std::string case_ = fmt::format("case5.{}", i + 7); + auto offset = i + 1; + if (val[i] == 0 && val[i + 1] == 0) { + offset++; + } + check(indices[1], hint("b", val[i], INF), slice(bOrder, offset, bOrder.size()), case_); + } + check(indices[2], hint("c", -INF, INF), slice(cOrder, 2, 8), "case5.19"); + } /* Case 6: (-INF, y]*/ + { + auto hint = [](const char* name, double value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + auto slice = [](decltype(aOrder) all, size_t end) { + return decltype(all){all.begin(), all.begin() + end}; + }; + check(indices[0], hint("a", 0), slice(aOrder, 7), "case6.1"); + check(indices[0], hint("a", -0.0), slice(aOrder, 7), "case6.2"); + check(indices[0], hint("a", -100.0), slice(aOrder, 2), "case6.3"); + // Case 6.4~6.15 + std::vector val{ + -INF, -MAX_NV, -MIN_NV, -MAX_SV, -MIN_SV, -0.0, 0.0, MIN_SV, MAX_SV, MIN_NV, MAX_NV, INF}; + for (size_t i = 0; i < val.size(); i++) { + std::string case_ = fmt::format("case6.{}", i + 3); + auto offset = i + 1; + if (val[i] == 0 && val[i + 1] == 0) { + offset++; + } + check(indices[1], hint("b", val[i]), slice(bOrder, offset), case_); + } + check(indices[2], hint("c", INF), cOrder, "case6.16"); + } + /* Case 7: (-INF, y)*/ { + auto hint = [](const char* name, double value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + auto slice = [](decltype(aOrder) all, size_t end) { + return decltype(all){all.begin(), all.begin() + end}; + }; + check(indices[0], hint("a", 100), slice(aOrder, 10), "case7.1"); + check(indices[1], hint("b", -INF), {}, "case7.2"); + int64_t x; + ::memcpy(&x, &INF, 8); + // int64_t x = *reinterpret_cast(&INF); + x--; + double y; + ::memcpy(&y, &x, 8); + // double y = *reinterpret_cast(&x); + check(indices[1], hint("b", -y), slice(bOrder, 1), "case7.3"); + check(indices[2], hint("c", -INF), {}, "case7.4"); + check(indices[2], hint("c", -y), slice(cOrder, 2), "case7.5"); + } +} +TEST_F(IndexScanTest, Bool) { + auto rows = R"( + bool | bool + true | true + true | false + false | + false | false + true | + )"_row; + auto schema = R"( + a | bool | | + b | bool | | true + )"_schema; + auto indices = R"( + TAG(t,2) + (i1,2):a + (i2,3):b + )"_index(schema); + auto kv = encodeTag(rows, 2, schema, indices); + auto kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + InitContext initCtx; + initCtx.requiredColumns = {kVid}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [](auto... vidList) { + std::vector ret; + std::vector value; + (value.push_back(std::to_string(vidList)), ...); + for (auto& v : value) { + Row row; + row.emplace_back(v); + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: Prefix */ { + check(indices[0], {makeColumnHint("a", true)}, expect(0, 1, 4), "case1.1"); + check(indices[0], {makeColumnHint("a", false)}, expect(2, 3), "case1.2"); + check(indices[1], {makeColumnHint("b", true)}, expect(0), "case1.3"); + check(indices[1], {makeColumnHint("b", false)}, expect(1, 3), "case1.4"); + } + /* Case 2: [x,INF) */ { + check(indices[0], {makeBeginColumnHint("a", false)}, expect(2, 3, 0, 1, 4), "case2.1"); + check(indices[0], {makeBeginColumnHint("a", true)}, expect(0, 1, 4), "case2.2"); + check(indices[1], {makeBeginColumnHint("b", true)}, expect(0), "case2.3"); + } +} +TEST_F(IndexScanTest, String1) { + /** + * data and query both without truncate + * That means ScanNode only access Index Key-Values + */ + auto rows = + " string | string | string | int \n" + " 123456789 | abcdefghi | \xFF\xFF\xFF\xFF\xFF\xFF\xFF | 0 \n" + " 123456789 | | | 1 \n" + " 12345678 | | \x01 | 2 \n" + " 123456788 | \xFF\xFF | | 3 \n" + " 12345678: | aacd | \xFF\xFF\xFF\xFF\xFF\xFF\xFE | 4 \n" + " a1234 | accd | \x00\x01 | 5 \n" + " | | | 6 \n" + ""_row; + // 0 1 2 3 4 5 6 7 + std::vector a = {6, 2, 3, 0, 1, 4, 5}; + std::vector b = {6, 4, 0, 5, 3}; + std::vector c = {1, 3, 5, 2, 4, 0}; + + auto schema = R"( + a | string | 10 | false + b | string | 10 | true + c | string | 10 | true + )"_schema; + auto indices = R"( + TAG(t,1) + (ia,2): a(10) + (ib,3): b(10) + (ic,4): c(10) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (size_t i = 0; i < kv.size(); i++) { + for (auto& item : kv[i]) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& acquiredColumns, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + helper.setFatal(scanNode.get(), true); + InitContext initCtx; + initCtx.requiredColumns.insert(acquiredColumns.begin(), acquiredColumns.end()); + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + std::vector result2(result.size()); + for (size_t j = 0; j < acquiredColumns.size(); j++) { + int p = initCtx.retColMap[acquiredColumns[j]]; + for (size_t i = 0; i < result.size(); i++) { + result2[i].emplace_back(result[i][p]); + } + } + result = result2; + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [&rows](const std::vector& vidList, const std::vector& columns) { + std::vector ret; + for (size_t i = 0; i < vidList.size(); i++) { + Row row; + row.emplace_back(Value(std::to_string(vidList[i]))); + for (size_t j = 0; j < columns.size(); j++) { + row.emplace_back(rows[vidList[i]][columns[j]]); + } + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: prefix */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeColumnHint(name, value)}; + }; + check(indices[0], hint("a", "123456789"), {kVid, "a"}, expect({0, 1}, {0}), "case1.1"); + check(indices[0], hint("a", "12345678"), {kVid, "a"}, expect({2}, {0}), "case1.2"); + check(indices[0], hint("a", ""), {kVid, "a"}, expect({6}, {0}), "case1.3"); + check(indices[1], hint("b", "\xFF\xFF"), {kVid, "b"}, expect({3}, {1}), "case1.4"); + check(indices[1], hint("b", ""), {kVid, "b"}, expect({6}, {1}), "case1.5"); + auto columnHint = hint("c", "\xFF\xFF\xFF\xFF\xFF\xFF\xFE"); + check(indices[2], columnHint, {kVid, "c"}, expect({4}, {2}), "case1.6"); + } + + /* Case 2: [x, INF)*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + auto slice = [](decltype(a) all, int begin) { + return decltype(all){all.begin() + begin, all.end()}; + }; + check(indices[0], hint("a", "12345678"), {kVid, "a"}, expect(slice(a, 1), {0}), "case2.1"); + check(indices[0], hint("a", "123456780"), {kVid, "a"}, expect(slice(a, 2), {0}), "case2.2"); + check(indices[0], hint("a", ""), {kVid, "a"}, expect(a, {0}), "case2.3"); + check(indices[1], hint("b", ""), {kVid, "b"}, expect(b, {1}), "case2.4"); + check(indices[1], hint("b", "abc"), {kVid, "b"}, expect(slice(b, 2), {1}), "case2.5"); + check(indices[1], hint("b", "aac"), {kVid, "b"}, expect(slice(b, 1), {1}), "case2.6"); + check(indices[1], hint("b", "aacd\x01"), {kVid, "b"}, expect(slice(b, 2), {1}), "case2.7"); + check(indices[1], hint("b", "\xFF\xFF"), {kVid, "b"}, expect(slice(b, 4), {1}), "case2.8"); + check(indices[1], hint("b", "\xFF\xFF\x01"), {kVid, "b"}, {}, "case2.9"); + check(indices[2], hint("c", ""), {kVid, "c"}, expect(c, {2}), "case2.10"); + check(indices[2], hint("c", "\x01"), {kVid, "c"}, expect(slice(c, 3), {2}), "case2.11"); + check(indices[2], + hint("c", "\xFF\xFF\xFF\xFF\xFF\xFF\xFF"), + {kVid, "c"}, + expect(slice(c, 5), {2}), + "case2.12"); + } + /* Case 3: (x,y) */ { + auto hint = [](const char* name, const std::string& begin, const std::string& end) { + return std::vector{makeColumnHint(name, begin, end)}; + }; + auto slice = [](decltype(a) all, int begin, int end) { + return decltype(all){all.begin() + begin, all.begin() + end}; + }; + auto columnHint = hint("a", "12345678", "123456789"); + check(indices[0], columnHint, {kVid, "a"}, expect(slice(a, 2, 3), {0}), "case3.1"); + check(indices[0], hint("a", "", "123456"), {kVid, "a"}, {}, "case3.2"); + check(indices[1], hint("b", "", "\xFF"), {kVid, "b"}, expect(slice(b, 1, 4), {1}), "case3.3"); + columnHint = hint("b", "aaccd", "\xFF\xFF"); + check(indices[1], columnHint, {kVid, "b"}, expect(slice(b, 1, 4), {1}), "case3.4"); + columnHint = hint("b", "\xFF", "\xFF\xFF\x01"); + check(indices[1], columnHint, {kVid, "b"}, expect(slice(b, 4, 5), {1}), "case3.5"); + check(indices[2], hint("c", "", "\x01"), {kVid, "c"}, expect(slice(c, 2, 3), {2}), "case3.6"); + columnHint = hint("c", "\x00\x00\x01"s, "\x01\x01"); + check(indices[2], columnHint, {kVid, "c"}, expect(slice(c, 2, 4), {2}), "case3.7"); + columnHint = hint("c", "\x00\x01"s, "\x01\x01"); + check(indices[2], columnHint, {kVid, "c"}, expect(slice(c, 3, 4), {2}), "case3.8"); + } + /* Case 4: (INF,y]*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + auto slice = [](decltype(a) all, int end) { + return decltype(all){all.begin(), all.begin() + end}; + }; + check(indices[0], hint("a", "123456789"), {kVid, "a"}, expect(slice(a, 5), {0}), "case4.1"); + check(indices[0], hint("a", ""), {kVid, "a"}, expect(slice(a, 1), {0}), "case4.2"); + check(indices[0], hint("a", "\xFF"), {kVid, "a"}, expect(slice(a, 7), {0}), "case4.3"); + check(indices[1], hint("b", "\xFF\xFF"), {kVid, "b"}, expect(slice(b, 5), {1}), "case4.4"); + check(indices[1], hint("b", "\xFF\xFE"), {kVid, "b"}, expect(slice(b, 4), {1}), "case4.5"); + check(indices[2], hint("c", "\x00\x00\x01"s), {kVid, "c"}, expect(slice(c, 2), {2}), "case4.6"); + check(indices[2], hint("c", "\x00\x01"s), {kVid, "c"}, expect(slice(c, 3), {2}), "case4.7"); + check(indices[2], hint("c", "\x01"), {kVid, "c"}, expect(slice(c, 4), {2}), "case4.8"); + auto columnHint = hint("c", "\xFF\xFF\xFF\xFF\xFF\xFF\xFF"); + check(indices[2], columnHint, {kVid, "c"}, expect(c, {2}), "case4.9"); + } +} +TEST_F(IndexScanTest, String2) { + /** + * data with truncate + * query without truncate + * That means ScanNode need to access base data only when require indexed column + */ + auto rows = + " string | string | string | int \n" + " 123456 | ABCDE2 | | 0 \n" + " 1234567 | ABCDE1 | \xFF\xFF\xFF\xFF\xFF | 1 \n" + " 1234567 | ABCDE | \xFF\xFF\xFF\xFF\xFF\x00\x01 | 2 \n" + " 123457 | ABCDF | \xFF\xFF\xFF\xFF\xFF | 3 \n" + ""_row; + auto schema = R"( + c1 | string | | false + c2 | string | | true + c3 | string | | true + )"_schema; + auto indices = R"( + TAG(t,1) + (i1,2):c1(5) + (i2,3):c2(5) + (i3,4):c3(5) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (size_t i = 0; i < kv.size(); i++) { + for (auto& item : kv[i]) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& acquiredColumns, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + helper.setFatal(scanNode.get(), true); + InitContext initCtx; + initCtx.requiredColumns.insert(acquiredColumns.begin(), acquiredColumns.end()); + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + std::vector result2(result.size()); + for (size_t j = 0; j < acquiredColumns.size(); j++) { + int p = initCtx.retColMap[acquiredColumns[j]]; + for (size_t i = 0; i < result.size(); i++) { + result2[i].emplace_back(result[i][p]); + } + } + result = result2; + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [&rows](const std::vector& vidList, const std::vector& columns) { + std::vector ret; + for (size_t i = 0; i < vidList.size(); i++) { + Row row; + row.emplace_back(Value(std::to_string(vidList[i]))); + for (size_t j = 0; j < columns.size(); j++) { + row.emplace_back(rows[vidList[i]][columns[j]]); + } + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: Prefix */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeColumnHint(name, value)}; + }; + check(indices[0], hint("c1", "1234"), {kVid, "c1"}, {}, "case1.1"); + check(indices[0], hint("c1", "12345"), {kVid, "c1"}, {}, "case1.2"); + check(indices[1], hint("c2", "ABCDE"), {kVid, "c2"}, expect({2}, {1}), "case1.3"); + check(indices[2], + hint("c3", "\xFF\xFF\xFF\xFF\xFF"), + {kVid, "c3"}, + expect({1, 3}, {2}), + "case1.4"); + } + /* Case 2: (x, INF)*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + check(indices[0], hint("c1", "12345"), {kVid, "c1"}, expect({0, 1, 2, 3}, {0}), "case2.1"); + check(indices[1], hint("c2", "ABCDE"), {kVid, "c2"}, expect({0, 1, 3}, {1}), "case2.2"); + check( + indices[2], hint("c3", "\xFF\xFF\xFF\xFF\xFF"), {kVid, "c3"}, expect({2}, {2}), "case2.3"); + } + /* Case 3: [x, y] */ { + auto hint = [](const char* name, const std::string& begin, const std::string& end) { + return std::vector{makeColumnHint(name, begin, end)}; + }; + auto columnHint = hint("c1", "12345", "12346"); + check(indices[0], columnHint, {kVid, "c1"}, expect({0, 1, 2, 3}, {0}), "case3.1"); + columnHint = hint("c1", "12345", "12345"); + check(indices[0], columnHint, {kVid, "c1"}, {}, "case3.2"); + columnHint = hint("c2", "ABCDE", "ABCDF"); + check(indices[1], columnHint, {kVid, "c2"}, expect({0, 1, 2, 3}, {1}), "case3.3"); + columnHint = hint("c2", "ABCDE", "ABCDE"); + check(indices[1], columnHint, {kVid, "c2"}, expect({2}, {1}), "case3.4"); + columnHint = hint("c3", "\xFF\xFF\xFF\xFF\xFF", "\xFF\xFF\xFF\xFF\xFF"); + check(indices[2], columnHint, {kVid, "c3"}, expect({1, 3}, {2}), "case3.5"); + } + /* Case 4: (INF,y)*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + auto columnHint = hint("c1", "12345"); + check(indices[0], columnHint, {kVid, "c1"}, {}, "case4.1"); + columnHint = hint("c2", "ABCDE"); + check(indices[1], columnHint, {kVid, "c2"}, {}, "case4.2"); + columnHint = hint("c2", "ABCDF"); + check(indices[1], columnHint, {kVid, "c2"}, expect({0, 1, 2}, {1}), "case4.3"); + columnHint = hint("c3", " \xFF\xFF\xFF\xFF\xFF"); + check(indices[2], columnHint, {kVid, "c3"}, {}, "case4.4"); + } +} +TEST_F(IndexScanTest, String3) { + /** + * data without truncate + * query with truncate + * That means ScanNode only access Index Key-Values + */ + auto rows = + " string | string | string | int \n" + " abcde | 98765 | | 0 \n" + " abcda | 12345 | \xFF\xFF\xFF\xFF\xFF | 1 \n" + " abcda | 98766 | | 2 \n" + " | | | 3 \n" + ""_row; + auto schema = R"( + a | string | | false + b | string | | true + c | string | | true + )"_schema; + auto indices = R"( + TAG(t,0) + (ia,1): a(6) + (ib,2): b(6) + (ic,3): c(6) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (size_t i = 0; i < kv.size(); i++) { + for (auto& item : kv[i]) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& acquiredColumns, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + helper.setFatal(scanNode.get(), true); + InitContext initCtx; + initCtx.requiredColumns.insert(acquiredColumns.begin(), acquiredColumns.end()); + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + std::vector result2(result.size()); + for (size_t j = 0; j < acquiredColumns.size(); j++) { + int p = initCtx.retColMap[acquiredColumns[j]]; + for (size_t i = 0; i < result.size(); i++) { + result2[i].emplace_back(result[i][p]); + } + } + result = result2; + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [&rows](const std::vector& vidList, const std::vector& columns) { + std::vector ret; + for (size_t i = 0; i < vidList.size(); i++) { + Row row; + row.emplace_back(Value(std::to_string(vidList[i]))); + for (size_t j = 0; j < columns.size(); j++) { + row.emplace_back(rows[vidList[i]][columns[j]]); + } + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: Prefix */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcde "), {kVid, "a"}, {}, "case1.1"); + check(indices[2], hint("c", "\xFF\xFF\xFF\xFF\xFF\xFF"), {kVid, "c"}, {}, "case1.2"); + } + /* Case 2: [x, INF)*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcdef"), {kVid, "a"}, {}, "case2.1"); + check(indices[0], hint("a", "abcda "), {kVid, "a"}, expect({0}, {0}), "case2.2"); + check(indices[1], hint("b", "987654 "), {kVid, "b"}, expect({2}, {1}), "case2.3"); + check(indices[2], hint("c", "\xFF\xFF\xFF\xFF\xFF\xFF"), {kVid, "c"}, {}, "case2.4"); + } + /* Case 3: (x, y]*/ { + auto hint = [](const char* name, const std::string& begin, const std::string& end) { + return std::vector{makeColumnHint(name, begin, end)}; + }; + auto columnHint = hint("a", "abcda ", "abcde "); + check(indices[0], columnHint, {kVid, "a"}, expect({0}, {0}), "case3.1"); + columnHint = hint("b", "98765 ", "98766 "); + check(indices[1], columnHint, {kVid, "b"}, expect({2}, {1}), "case3.2"); + columnHint = hint("c", "\xFF\xFF\xFF\xFF\xFE ", "\xFF\xFF\xFF\xFF\xFF "); + check(indices[2], columnHint, {kVid, "c"}, expect({1}, {2}), "case3.3"); + } + /* Case 4: (INF,y)*/ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcde "), {kVid, "a"}, expect({3, 1, 2, 0}, {0}), "case4.1"); + check(indices[1], hint("b", "98764 "), {kVid, "b"}, expect({1}, {1}), "case4.2"); + check(indices[2], + hint("c", "\xFF\xFF\xFF\xFF\xFF "), + {kVid, "c"}, + expect({2, 3, 1}, {2}), + "case4.3"); + } +} +TEST_F(IndexScanTest, String4) { + /** + * data with truncate + * query with truncate + * That means ScanNode always need to access base data. + */ + auto rows = + " string | string | int \n" + " abcde1 | 987654 | 0 \n" + " abcdd | 98765 | 1 \n" + " abcdf | 12345 | 2 \n" + " abcde | \xFF\xFF\xFF\xFF\xFF\xFF | 3 \n" + " abcde12 | | 4 \n" + " abcde123 | \xFF\xFF\xFF\xFF\xFF | 5 \n" + " abcde1234 | \xFF\xFF\xFF\xFF\xFF\xFF\x01| 6 \n" + " abcde1234 | | 7 \n" + ""_row; + auto schema = R"( + a | string | | false + b | string | | true + )"_schema; + auto indices = R"( + TAG(t,0) + (ia,1): a(5) + (ib,2): b(5) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + auto kvstore = std::make_unique(); + for (size_t i = 0; i < kv.size(); i++) { + for (auto& item : kv[i]) { + kvstore->put(item.first, item.second); + } + } + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& acquiredColumns, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + helper.setFatal(scanNode.get(), true); + InitContext initCtx; + initCtx.requiredColumns.insert(acquiredColumns.begin(), acquiredColumns.end()); + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + std::vector result2(result.size()); + for (size_t j = 0; j < acquiredColumns.size(); j++) { + int p = initCtx.retColMap[acquiredColumns[j]]; + for (size_t i = 0; i < result.size(); i++) { + result2[i].emplace_back(result[i][p]); + } + } + result = result2; + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto expect = [&rows](const std::vector& vidList, const std::vector& columns) { + std::vector ret; + for (size_t i = 0; i < vidList.size(); i++) { + Row row; + row.emplace_back(Value(std::to_string(vidList[i]))); + for (size_t j = 0; j < columns.size(); j++) { + row.emplace_back(rows[vidList[i]][columns[j]]); + } + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: Prefix */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcde"), {kVid, "a"}, expect({3}, {0}), "case1.1"); + check(indices[0], hint("a", "abcde1234"), {kVid, "a"}, expect({6, 7}, {0}), "case1.2"); + check(indices[0], hint("a", "abcde2"), {kVid, "a"}, {}, "case1.3"); + check(indices[1], hint("b", "\xFF\xFF\xFF\xFF\xFF"), {kVid, "b"}, expect({5}, {1}), "case1.4"); + } + /* Case 2: (x, INF) */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeBeginColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcde"), {kVid, "a"}, expect({0, 4, 5, 6, 7, 2}, {0}), "case2.1"); + check(indices[0], hint("a", "abcde12"), {kVid, "a"}, expect({5, 6, 7, 2}, {0}), "case2.2"); + check(indices[0], hint("a", "abcde12345"), {kVid, "a"}, expect({2}, {0}), "case2.3"); + check( + indices[0], hint("a", "abcdd"), {kVid, "a"}, expect({0, 3, 4, 5, 6, 7, 2}, {0}), "case2.4"); + auto columnHint = hint("b", "\xFF\xFF\xFF\xFF\xFF"); + check(indices[1], columnHint, {kVid, "b"}, expect({3, 6}, {1}), "case2.5"); + columnHint = hint("b", "\xFF\xFF\xFF\xFF\xFF\x01"); + check(indices[1], columnHint, {kVid, "b"}, expect({3, 6}, {1}), "case2.6"); + columnHint = hint("b", "\xFF\xFF\xFF\xFF\xFF\xFF"); + check(indices[1], columnHint, {kVid, "b"}, expect({6}, {1}), "case2.7"); + } + /* Case 3: [x,y) */ { + auto hint = [](const char* name, const std::string& begin, const std::string& end) { + return std::vector{makeColumnHint(name, begin, end)}; + }; + auto columnHint = hint("a", "abcdd123", "abcde1234"); + check(indices[0], columnHint, {kVid, "a"}, expect({0, 3, 4, 5}, {0}), "case3.1"); + columnHint = hint("a", "abcde1", "abcdf"); + check(indices[0], columnHint, {kVid, "a"}, expect({0, 4, 5, 6, 7}, {0}), "case3.2"); + columnHint = hint("a", "abcde12345", "abcde123456"); + check(indices[0], columnHint, {kVid, "a"}, {}, "case3.3"); + columnHint = hint("a", "abcde1234", "abcde12345"); + check(indices[0], columnHint, {kVid, "a"}, expect({6, 7}, {0}), "case3.4"); + columnHint = hint("b", "\xFF\xFF\xFF\xFF\xFF", "\xFF\xFF\xFF\xFF\xFF\x00\x01"s); + check(indices[1], columnHint, {kVid, "b"}, expect({5}, {1}), "case3.5"); + columnHint = hint("b", "\xFF\xFF\xFF\xFF\xFF\x01", "\xFF\xFF\xFF\xFF\xFF\xFF\xFF"); + check(indices[1], columnHint, {kVid, "b"}, expect({3, 6}, {1}), "case3.6"); + } + /* Case 4: (INF,y] */ { + auto hint = [](const char* name, const std::string& value) { + return std::vector{makeEndColumnHint(name, value)}; + }; + check(indices[0], hint("a", "abcde123"), {kVid, "a"}, expect({1, 0, 3, 4, 5}, {0}), "case4.1"); + check(indices[0], hint("a", "abcde"), {kVid, "a"}, expect({1, 3}, {0}), "case4.2"); + check(indices[0], + hint("a", "abcde1234"), + {kVid, "a"}, + expect({1, 0, 3, 4, 5, 6, 7}, {0}), + "case4.3"); + check(indices[1], + hint("b", "\xFF\xFF\xFF\xFF\xFF"), + {kVid, "b"}, + expect({2, 0, 1, 5}, {1}), + "case4.4"); + check(indices[1], + hint("b", "\xFF\xFF\xFF\xFF\xFF\xFF"), + {kVid, "b"}, + expect({2, 0, 1, 3, 5}, {1}), + "case4.5"); + } +} +TEST_F(IndexScanTest, Nullable) { + std::shared_ptr schema; + auto kvstore = std::make_unique(); + auto check = [&](std::shared_ptr index, + const std::vector& columnHints, + const std::vector& expect, + const std::string& case_) { + auto context = makeContext(1, 0); + auto scanNode = + std::make_unique(context.get(), 0, columnHints, kvstore.get()); + IndexScanTestHelper helper; + helper.setIndex(scanNode.get(), index); + helper.setTag(scanNode.get(), schema); + helper.setFatal(scanNode.get(), true); + InitContext initCtx; + initCtx.requiredColumns = {kVid}; + scanNode->init(initCtx); + scanNode->execute(0); + + std::vector result; + while (true) { + auto res = scanNode->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + EXPECT_EQ(result, expect) << "Fail at case " << case_; + }; + auto hint = [](const std::string& name) { + return std::vector{makeColumnHint(name, Value::kNullValue)}; + }; + auto expect = [](auto... vidList) { + std::vector ret; + std::vector value; + (value.push_back(std::to_string(vidList)), ...); + for (auto& v : value) { + Row row; + row.emplace_back(v); + ret.emplace_back(std::move(row)); + } + return ret; + }; + /* Case 1: Int*/ { + auto rows = R"( + int | int + 0 | 0 + 9223372036854775807 | + 9223372036854775807 | + -9223372036854775807 | 9223372036854775807 + )"_row; + schema = R"( + a | int | | false + b | int | | true + )"_schema; + auto indices = R"( + TAG(t,1) + (ia,2):a + (ib,3):b + (iba,4):b,a + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + check(indices[0], hint("a"), {}, "case1.1"); + check(indices[1], hint("b"), expect(1, 2), "case1.2"); + check(indices[2], hint("b"), expect(1, 2), "case1.3"); + } + /* Case 2: Float */ { + auto rows = R"( + float | float + 1.7976931348623157e+308 | + 0 | + | + | <-NaN> + )"_row; + schema = R"( + a | double | | false + b | double | | true + )"_schema; + auto indices = R"( + TAG(t,1) + (ia,2):a + (ib,3):b + (iba,4):b,a + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + check(indices[0], hint("a"), {}, "case2.1"); + check(indices[1], hint("b"), expect(0, 2), "case2.2"); + check(indices[2], hint("b"), expect(0, 2), "case2.3"); + } + /* Case 3: String */ { + auto rows = R"( + string | string + \xFF\xFF\xFF | + 123 | 456 + \xFF\xFF\x01 | \xFF\xFF\xFF + \xFF\xFF\x01 | + )"_row; + schema = R"( + a | string | | false + b | string | | true + )"_schema; + auto indices = R"( + TAG(t,1) + (ia,2):a(3) + (ib,3):b(3) + (iba,4):b(3),a(3) + )"_index(schema); + auto kv = encodeTag(rows, 1, schema, indices); + kvstore = std::make_unique(); + for (auto& iter : kv) { + for (auto& item : iter) { + kvstore->put(item.first, item.second); + } + } + check(indices[0], hint("a"), {}, "case3.1"); + check(indices[1], hint("b"), expect(0, 3), "case3.2"); + check(indices[2], hint("b"), expect(0, 3), "case3.3"); + } +} +TEST_F(IndexScanTest, TTL) { + // TODO(hs.zhang): add unittest +} +TEST_F(IndexScanTest, Time) { + // TODO(hs.zhang): add unittest +} +TEST_F(IndexScanTest, Date) { + // TODO(hs.zhang): add unittest +} +TEST_F(IndexScanTest, DateTime) { + // TODO(hs.zhang): add unittest +} +TEST_F(IndexScanTest, Compound) { + // TODO(hs.zhang): add unittest +} + +class IndexTest : public ::testing::Test { + protected: + static PlanContext* getPlanContext() { + static std::unique_ptr ctx = std::make_unique(nullptr, 0, 8, false); + return ctx.get(); + } + static std::unique_ptr makeContext() { + auto ctx = std::make_unique(getPlanContext()); + ctx->tagId_ = 0; + ctx->edgeType_ = 0; + return ctx; + } + static std::vector collectResult(IndexNode* node) { + std::vector result; + InitContext initCtx; + node->init(initCtx); + while (true) { + auto res = node->next(); + ASSERT(res.success()); + if (!res.hasData()) { + break; + } + result.emplace_back(std::move(res).row()); + } + return result; + } + static std::vector pick(const std::vector& rows, const std::vector& indices) { + std::vector ret; + for (auto i : indices) { + ret.push_back(rows[i]); + } + return ret; + } + ::nebula::ObjectPool pool; +}; + +TEST_F(IndexTest, Selection) { + const auto rows = R"( + int | int + 1 | 2 + | + 8 | 10 + 8 | 10 + )"_row; + size_t currentOffset = 0; + auto ctx = makeContext(); + auto expr = RelationalExpression::makeGE(&pool, + TagPropertyExpression::make(&pool, "", "a"), + ConstantExpression::make(&pool, Value(5))); + + auto selection = std::make_unique(ctx.get(), expr); + auto mockChild = std::make_unique(ctx.get()); + mockChild->executeFunc = [](PartitionID) { return ::nebula::cpp2::ErrorCode::SUCCEEDED; }; + mockChild->nextFunc = [&rows, ¤tOffset]() -> IndexNode::Result { + if (currentOffset < rows.size()) { + auto row = rows[currentOffset++]; + return IndexNode::Result(std::move(row)); + } else { + return IndexNode::Result(); + } + }; + mockChild->initFunc = [](InitContext& initCtx) -> ::nebula::cpp2::ErrorCode { + initCtx.returnColumns = {"a", "b"}; + initCtx.retColMap = {{"a", 0}, {"b", 1}}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + selection->addChild(std::move(mockChild)); + ASSERT_EQ(collectResult(selection.get()), pick(rows, {2, 3})); +} +TEST_F(IndexTest, Projection) { + const auto rows = R"( + int | int | int + 1 | 2 | 3 + 4 | 5 | 6 + 7 | 8 |9 + )"_row; + size_t currentOffset = 0; + auto ctx = makeContext(); + auto projection = + std::make_unique(ctx.get(), std::vector{"c", "a", "b"}); + auto mockChild = std::make_unique(ctx.get()); + mockChild->executeFunc = [](PartitionID) { return ::nebula::cpp2::ErrorCode::SUCCEEDED; }; + mockChild->nextFunc = [&rows, ¤tOffset]() -> IndexNode::Result { + if (currentOffset < rows.size()) { + auto row = rows[currentOffset++]; + return IndexNode::Result(std::move(row)); + } else { + return IndexNode::Result(); + } + }; + mockChild->initFunc = [](InitContext& initCtx) -> ::nebula::cpp2::ErrorCode { + initCtx.returnColumns = {"a", "b", "c"}; + initCtx.retColMap = {{"a", 0}, {"b", 1}, {"c", 2}}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + projection->addChild(std::move(mockChild)); + auto expect = R"( + int | int | int + 3 | 1 | 2 + 6 | 4 | 5 + 9 | 7 | 8 + )"_row; + ASSERT_EQ(collectResult(projection.get()), expect); +} +TEST_F(IndexTest, Limit) { + auto genRows = [](int start, int end) { + std::vector ret; + for (int i = start; i < end; i++) { + Row row; + row.emplace_back(Value(i)); + row.emplace_back(Value(i * i)); + row.emplace_back(Value(i * i * i)); + ret.emplace_back(std::move(row)); + } + return ret; + }; + auto rows = genRows(0, 1000); + size_t currentOffset = 0; + auto ctx = makeContext(); + auto limit = std::make_unique(ctx.get(), 10); + auto mockChild = std::make_unique(ctx.get()); + mockChild->executeFunc = [](PartitionID) { return ::nebula::cpp2::ErrorCode::SUCCEEDED; }; + mockChild->nextFunc = [&rows, ¤tOffset]() -> IndexNode::Result { + if (currentOffset < rows.size()) { + auto row = rows[currentOffset++]; + return IndexNode::Result(std::move(row)); + } else { + return IndexNode::Result(); + } + }; + mockChild->initFunc = [](InitContext&) -> ::nebula::cpp2::ErrorCode { + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + limit->addChild(std::move(mockChild)); + ASSERT_EQ(collectResult(limit.get()), genRows(0, 10)); +} +TEST_F(IndexTest, Dedup) { + auto rows1 = R"( + int | int + 1 | 2 + 1 | 3 + 2 | 2 + )"_row; + auto rows2 = R"( + int | int + 1 | 4 + 2 | 3 + 1 | 5 + 3 | 6 + )"_row; + size_t offset1 = 0, offset2 = 0; + auto ctx = makeContext(); + auto dedup = std::make_unique(ctx.get(), std::vector{"a"}); + auto child1 = std::make_unique(ctx.get()); + child1->executeFunc = [](PartitionID) { return ::nebula::cpp2::ErrorCode::SUCCEEDED; }; + child1->nextFunc = [&rows1, &offset1]() -> IndexNode::Result { + if (offset1 < rows1.size()) { + auto row = rows1[offset1++]; + return IndexNode::Result(std::move(row)); + } else { + return IndexNode::Result(); + } + }; + child1->initFunc = [](InitContext& initCtx) -> ::nebula::cpp2::ErrorCode { + initCtx.returnColumns = {"a", "b"}; + initCtx.retColMap = {{"a", 0}, {"b", 1}}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + auto child2 = std::make_unique(ctx.get()); + child2->executeFunc = [](PartitionID) { return ::nebula::cpp2::ErrorCode::SUCCEEDED; }; + child2->nextFunc = [&rows2, &offset2]() -> IndexNode::Result { + if (offset2 < rows2.size()) { + auto row = rows2[offset2++]; + return IndexNode::Result(std::move(row)); + } else { + return IndexNode::Result(); + } + }; + child2->initFunc = [](InitContext& initCtx) -> ::nebula::cpp2::ErrorCode { + initCtx.returnColumns = {"a", "b"}; + initCtx.retColMap = {{"a", 0}, {"b", 1}}; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + dedup->addChild(std::move(child1)); + dedup->addChild(std::move(child2)); + auto expect = R"( + int | int + 1 | 2 + 2 | 2 + 3 | 6 + )"_row; + ASSERT_EQ(collectResult(dedup.get()), expect); +} +} // namespace storage +} // namespace nebula +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + folly::init(&argc, &argv, true); + google::SetStderrLogging(google::INFO); + return RUN_ALL_TESTS(); +} diff --git a/src/storage/test/IndexTestUtil.h b/src/storage/test/IndexTestUtil.h new file mode 100644 index 00000000000..73e457e6167 --- /dev/null +++ b/src/storage/test/IndexTestUtil.h @@ -0,0 +1,611 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once +#include +#include +#include +#include +#include +#include + +#include "common/datatypes/DataSet.h" +#include "common/meta/NebulaSchemaProvider.h" +#include "folly/Conv.h" +#include "folly/String.h" +#include "kvstore/KVIterator.h" +#include "kvstore/KVStore.h" +#include "storage/exec/IndexNode.h" +namespace nebula { +namespace storage { +using ::nebula::kvstore::KVIterator; +class MockKVIterator : public KVIterator { + using KVMap = std::map; + + public: + MockKVIterator(const KVMap& kv, KVMap::iterator&& iter) : kv_(kv), iter_(std::move(iter)) {} + bool valid() const { return iter_ != kv_.end() && validFunc_(iter_); } + void next() { iter_++; } + void prev() { iter_--; } + folly::StringPiece key() const { return folly::StringPiece(iter_->first); } + folly::StringPiece val() const { return folly::StringPiece(iter_->second); } + void setValidFunc(const std::function validFunc) { + validFunc_ = validFunc; + } + + private: + const KVMap& kv_; + KVMap::iterator iter_; + std::function validFunc_; +}; +class MockKVStore : public ::nebula::kvstore::KVStore { + private: + GraphSpaceID spaceId_{0}; + std::map kv_; + + public: + MockKVStore() {} + // Return bit-OR of StoreCapability values; + uint32_t capability() const override { + CHECK(false); + return 0; + }; + void stop() override {} + ErrorOr partLeader(GraphSpaceID spaceId, + PartitionID partID) override { + UNUSED(spaceId), UNUSED(partID); + CHECK(false); + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + // Read a single key + nebula::cpp2::ErrorCode get(GraphSpaceID spaceId, + PartitionID partId, + const std::string& key, + std::string* value, + bool canReadFromFollower = false) override { + UNUSED(canReadFromFollower); + UNUSED(partId); + CHECK_EQ(spaceId, spaceId_); + auto iter = kv_.lower_bound(key); + if (iter != kv_.end() && iter->first == key) { + *value = iter->second; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } else { + return ::nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; + } + } + + // Read multiple keys, if error occurs a cpp2::ErrorCode is returned, + // If key[i] does not exist, the i-th value in return value would be + // Status::KeyNotFound + std::pair> multiGet( + GraphSpaceID spaceId, + PartitionID partId, + const std::vector& keys, + std::vector* values, + bool canReadFromFollower = false) override { + UNUSED(canReadFromFollower); + UNUSED(spaceId); + UNUSED(partId); + std::vector status; + nebula::cpp2::ErrorCode ret = nebula::cpp2::ErrorCode::SUCCEEDED; + for (auto& key : keys) { + auto iter = kv_.lower_bound(key); + if (iter != kv_.end() && iter->first == key) { + values->push_back(iter->second); + status.push_back(Status::OK()); + } else { + values->push_back(""); + status.push_back(Status::KeyNotFound()); + ret = nebula::cpp2::ErrorCode::E_PARTIAL_RESULT; + } + } + return {ret, std::move(status)}; + } + + // Get all results in range [start, end) + nebula::cpp2::ErrorCode range(GraphSpaceID spaceId, + PartitionID partId, + const std::string& start, + const std::string& end, + std::unique_ptr* iter, + bool canReadFromFollower = false) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(canReadFromFollower); + CHECK_EQ(spaceId, spaceId_); + std::unique_ptr mockIter; + mockIter = std::make_unique(kv_, kv_.lower_bound(start)); + mockIter->setValidFunc([end](const decltype(kv_)::iterator& it) { return it->first < end; }); + (*iter) = std::move(mockIter); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + // virtual nebula::cpp2::ErrorCode prefix(GraphSpaceID spaceId, + // PartitionID partId, + // std::string&& prefix, + // std::unique_ptr* iter, + // bool canReadFromFollower = false) = delete override; + // virtual nebula::cpp2::ErrorCode rangeWithPrefix(GraphSpaceID spaceId, + // PartitionID partId, + // std::string&& start, + // std::string&& prefix, + // std::unique_ptr* iter, + // bool canReadFromFollower = false) = delete; + // virtual nebula::cpp2::ErrorCode range(GraphSpaceID spaceId, + // PartitionID partId, + // std::string&& start, + // std::string&& end, + // std::unique_ptr* iter, + // bool canReadFromFollower = false) = delete; + nebula::cpp2::ErrorCode prefix(GraphSpaceID spaceId, + PartitionID partId, + const std::string& prefix, + std::unique_ptr* iter, + bool canReadFromFollower = false) override { + UNUSED(canReadFromFollower); + UNUSED(spaceId); + UNUSED(partId); + CHECK_EQ(spaceId, spaceId_); + auto mockIter = std::make_unique(kv_, kv_.lower_bound(prefix)); + mockIter->setValidFunc([prefix](const decltype(kv_)::iterator& it) { + if (it->first.size() < prefix.size()) { + return false; + } + for (size_t i = 0; i < prefix.size(); i++) { + if (prefix[i] != it->first[i]) { + return false; + } + } + return true; + }); + (*iter) = std::move(mockIter); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + // Get all results with prefix starting from start + nebula::cpp2::ErrorCode rangeWithPrefix(GraphSpaceID spaceId, + PartitionID partId, + const std::string& start, + const std::string& prefix, + std::unique_ptr* iter, + bool canReadFromFollower = false) override { + UNUSED(canReadFromFollower); + UNUSED(spaceId); + UNUSED(partId); + CHECK_EQ(spaceId, spaceId_); + auto mockIter = std::make_unique(kv_, kv_.lower_bound(start)); + mockIter->setValidFunc([prefix](const decltype(kv_)::iterator& it) { + if (it->first.size() < prefix.size()) { + return false; + } + for (size_t i = 0; i < prefix.size(); i++) { + if (prefix[i] != it->first[i]) { + return false; + } + } + return true; + }); + (*iter) = std::move(mockIter); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + nebula::cpp2::ErrorCode sync(GraphSpaceID spaceId, PartitionID partId) override { + UNUSED(spaceId); + UNUSED(partId); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + void asyncMultiPut(GraphSpaceID spaceId, + PartitionID partId, + std::vector<::nebula::kvstore::KV>&& keyValues, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + for (size_t i = 0; i < keyValues.size(); i++) { + kv_.emplace(std::move(keyValues[i])); + } + } + + // Asynchronous version of remove methods + void asyncRemove(GraphSpaceID spaceId, + PartitionID partId, + const std::string& key, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + kv_.erase(key); + } + + void asyncMultiRemove(GraphSpaceID spaceId, + PartitionID partId, + std::vector&& keys, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + for (size_t i = 0; i < keys.size(); i++) { + kv_.erase(keys[i]); + } + } + + void asyncRemoveRange(GraphSpaceID spaceId, + PartitionID partId, + const std::string& start, + const std::string& end, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + for (auto iter = kv_.lower_bound(start); iter != kv_.end();) { + if (iter->first < end) { + iter = kv_.erase(iter); + } else { + iter++; + } + } + } + + void asyncAtomicOp(GraphSpaceID spaceId, + PartitionID partId, + raftex::AtomicOp op, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + UNUSED(op); + LOG(FATAL) << "Unexpect"; + } + void asyncAppendBatch(GraphSpaceID spaceId, + PartitionID partId, + std::string&& batch, + ::nebula::kvstore::KVCallback cb) override { + UNUSED(spaceId); + UNUSED(partId); + UNUSED(cb); + LOG(FATAL) << "Unexpect " << batch; + } + nebula::cpp2::ErrorCode ingest(GraphSpaceID spaceId) override { + UNUSED(spaceId); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + int32_t allLeader( + std::unordered_map>& leaderIds) override { + UNUSED(leaderIds); + + LOG(FATAL) << "Unexpect"; + return 0; + } + + ErrorOr> part( + GraphSpaceID spaceId, PartitionID partId) override { + UNUSED(spaceId); + UNUSED(partId); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + nebula::cpp2::ErrorCode compact(GraphSpaceID spaceId) override { + UNUSED(spaceId); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + nebula::cpp2::ErrorCode flush(GraphSpaceID spaceId) override { + UNUSED(spaceId); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + ErrorOr> createCheckpoint( + GraphSpaceID spaceId, const std::string& name) override { + UNUSED(spaceId); + UNUSED(name); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + }; + nebula::cpp2::ErrorCode dropCheckpoint(GraphSpaceID spaceId, const std::string& name) override { + UNUSED(spaceId); + UNUSED(name); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + nebula::cpp2::ErrorCode setWriteBlocking(GraphSpaceID spaceId, bool sign) override { + UNUSED(spaceId); + UNUSED(sign); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + + ErrorOr> backupTable( + GraphSpaceID spaceId, + const std::string& name, + const std::string& tablePrefix, + std::function filter) override { + UNUSED(spaceId); + UNUSED(name); + UNUSED(tablePrefix); + UNUSED(filter); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + // for meta BR + nebula::cpp2::ErrorCode restoreFromFiles(GraphSpaceID spaceId, + const std::vector& files) override { + UNUSED(spaceId); + UNUSED(files); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + nebula::cpp2::ErrorCode multiPutWithoutReplicator( + GraphSpaceID spaceId, std::vector<::nebula::kvstore::KV> keyValues) override { + UNUSED(spaceId); + UNUSED(keyValues); + LOG(FATAL) << "Unexpect"; + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + std::vector getDataRoot() const override { + LOG(FATAL) << "Unexpect"; + return {}; + } + + ErrorOr getProperty(GraphSpaceID spaceId, + const std::string& property) override { + UNUSED(spaceId); + UNUSED(property); + return ::nebula::cpp2::ErrorCode::SUCCEEDED; + } + void put(const std::string& key, const std::string& value) { kv_[key] = value; } + + private: + using ::nebula::kvstore::KVStore::prefix; + using ::nebula::kvstore::KVStore::range; + using ::nebula::kvstore::KVStore::rangeWithPrefix; +}; +class MockIndexNode : public IndexNode { + public: + explicit MockIndexNode(RuntimeContext* context) : IndexNode(context, "MockIndexNode") {} + ::nebula::cpp2::ErrorCode init(InitContext& initCtx) override { return initFunc(initCtx); } + std::unique_ptr copy() override { LOG(FATAL) << "Unexpect"; } + std::function nextFunc; + std::function<::nebula::cpp2::ErrorCode(PartitionID)> executeFunc; + std::function<::nebula::cpp2::ErrorCode(InitContext& initCtx)> initFunc; + std::string identify() override { return "MockIndexNode"; } + + private: + Result doNext() override { return nextFunc(); } + ::nebula::cpp2::ErrorCode doExecute(PartitionID partId) override { return executeFunc(partId); }; +}; + +class RowParser { + public: + explicit RowParser(const std::string& str) { + ss = std::stringstream(folly::trimWhitespace(folly::StringPiece(str)).toString()); + parseHeader(); + parseRow(); + } + void parseHeader() { + std::string line; + std::getline(ss, line); + std::vector types; + folly::split("|", line, types); + for (size_t i = 0; i < types.size(); i++) { + types[i] = folly::trimWhitespace(folly::StringPiece(types[i])).toString(); + } + typeList_ = std::move(types); + } + void parseRow() { + std::string line; + while (std::getline(ss, line)) { + std::vector values; + folly::split("|", line, values); + for (size_t i = 0; i < values.size(); i++) { + values[i] = folly::trimWhitespace(folly::StringPiece(values[i])).toString(); + } + Row row; + for (size_t i = 0; i < values.size(); i++) { + if (values[i] == "") { + row.emplace_back(Value::null()); + } else if (values[i] == "") { + row.emplace_back(std::numeric_limits::infinity()); + } else if (values[i] == "<-INF>") { + row.emplace_back(-std::numeric_limits::infinity()); + } else if (values[i] == "") { + row.emplace_back(std::numeric_limits::quiet_NaN()); + } else if (values[i] == "<-NaN>") { + row.emplace_back(-std::numeric_limits::quiet_NaN()); + } else { + row.emplace_back(transformMap[typeList_[i]](values[i])); + } + } + rowList_.emplace_back(std::move(row)); + } + } + const std::vector& getResult() { return rowList_; } + + private: + std::stringstream ss; + std::vector typeList_; + std::vector rowList_; + std::map> transformMap{ + {"int", [](const std::string& str) { return Value(std::stol(str)); }}, + {"string", [](const std::string& str) { return Value(str); }}, + {"float", [](const std::string& str) { return Value(folly::to(str)); }}, + {"bool", [](const std::string& str) { return Value(str == "true" ? true : false); }}}; +}; + +/** + * define a schema + * + * format: + * name | type | length | nullable + * example: + * std::string str=R"( + * a | int | | + * b | string | | true + * c | double | 10 | + * )"_schema + */ +class SchemaParser { + public: + explicit SchemaParser(const std::string& str) { + schema = std::make_shared<::nebula::meta::NebulaSchemaProvider>(0); + ss = std::stringstream(folly::trimWhitespace(folly::StringPiece(str)).toString()); + parse(); + } + void parse() { + std::string line; + while (std::getline(ss, line)) { + std::vector values; + folly::split("|", line, values); + std::string name = folly::trimWhitespace(folly::StringPiece(values[0])).toString(); + auto type = typeMap[folly::trimWhitespace(folly::StringPiece(values[1])).toString()]; + int length = 0; + { + std::string lenStr = folly::trimWhitespace(folly::StringPiece(values[2])).toString(); + if (lenStr != "") { + length = std::stoi(lenStr); + } + } + bool nullable = false; + { + std::string nullableStr = folly::trimWhitespace(folly::StringPiece(values[3])).toString(); + if (nullableStr == "true") { + nullable = true; + } + } + schema->addField(name, type, length, nullable); + } + } + std::shared_ptr<::nebula::meta::NebulaSchemaProvider> getResult() { return schema; } + + private: + std::stringstream ss; + std::shared_ptr<::nebula::meta::NebulaSchemaProvider> schema; + std::map typeMap{ + {"int", ::nebula::cpp2::PropertyType::INT64}, + {"double", ::nebula::cpp2::PropertyType::DOUBLE}, + {"string", ::nebula::cpp2::PropertyType::STRING}, + {"bool", ::nebula::cpp2::PropertyType::BOOL}}; +}; + +/** + * define index of a schema + * + * format: + * (Tag|Edge)(name,id) + * example + * std::string str=R"( + * Tag(name,id) + * (i1,1): a,b(10),c + * (i2,2): b(5),c + * )"_index(schema) + */ +class IndexParser { + public: + using IndexItem = ::nebula::meta::cpp2::IndexItem; + using SchemaProvider = ::nebula::meta::NebulaSchemaProvider; + explicit IndexParser(const std::string& str) { + ss = std::stringstream(folly::trimWhitespace(folly::StringPiece(str)).toString()); + parseSchema(); + } + void parseSchema() { + static std::regex pattern(R"((TAG|EDGE)\((.+),(\d+)\))"); + std::smatch match; + std::string line; + std::getline(ss, line); + CHECK(std::regex_match(line, match, pattern)); + std::string name = match.str(2); + int32_t id = std::stoi(match.str(3)); + schemaName_ = name; + if (match.str(1) == "TAG") { + schemaId_.set_tag_id(id); + } else { + schemaId_.set_edge_type(id); + } + } + std::vector> operator()(std::shared_ptr schema) { + schema_ = schema; + std::vector> ret; + std::string line; + while (std::getline(ss, line)) { + auto index = parse(folly::trimWhitespace(folly::StringPiece(line)).toString()); + ret.push_back(index); + } + return ret; + } + std::shared_ptr parse(const std::string& line) { + auto ret = std::make_shared(); + ret->set_schema_id(schemaId_); + ret->set_schema_name(schemaName_); + static std::regex pattern(R"(\((.+),(\d+)\):(.+))"); + std::smatch match; + CHECK(std::regex_match(line, match, pattern)); + ret->set_index_name(folly::trimWhitespace(folly::StringPiece(match.str(1)).toString())); + ret->set_index_id(std::stoi(match.str(2))); + std::string columnStr = match.str(3); + std::vector columns; + folly::split(",", columnStr, columns); + for (size_t i = 0; i < columns.size(); i++) { + columns[i] = folly::trimWhitespace(folly::StringPiece(columns[i])).toString(); + } + std::vector<::nebula::meta::cpp2::ColumnDef> fields; + for (auto& column : columns) { + std::string name; + int length; + std::smatch m; + std::regex p(R"((.+)\((\d+)\))"); + if (std::regex_match(column, m, p)) { + name = m.str(1); + length = std::stoi(m.str(2)); + } else { + name = column; + length = 0; + } + ::nebula::meta::cpp2::ColumnDef col; + auto field = schema_->field(name); + col.set_name(name); + ::nebula::meta::cpp2::ColumnTypeDef type; + if (length > 0) { + type.set_type_length(length); + type.set_type(::nebula::cpp2::PropertyType::FIXED_STRING); + } else { + type.set_type(field->type()); + } + col.set_type(type); + col.set_nullable(field->nullable()); + fields.emplace_back(std::move(col)); + } + ret->set_fields(fields); + return ret; + } + + private: + std::stringstream ss; + std::string schemaName_; + ::nebula::cpp2::SchemaID schemaId_; + std::shared_ptr schema_; + std::shared_ptr<::nebula::meta::cpp2::IndexItem> index_; +}; + +// Definition of UDL +std::vector operator""_row(const char* str, std::size_t len) { + auto ret = RowParser(std::string(str, len)).getResult(); + return ret; +} +std::shared_ptr<::nebula::meta::NebulaSchemaProvider> operator"" _schema(const char* str, + std::size_t) { + return SchemaParser(std::string(str)).getResult(); +} + +IndexParser operator"" _index(const char* str, std::size_t) { + return IndexParser(std::string(str)); +} + +} // namespace storage +} // namespace nebula diff --git a/src/storage/test/LookupIndexTest.cpp b/src/storage/test/LookupIndexTest.cpp index 2740e72d9bd..2c883faddff 100644 --- a/src/storage/test/LookupIndexTest.cpp +++ b/src/storage/test/LookupIndexTest.cpp @@ -9,6 +9,10 @@ #include "codec/RowWriterV2.h" #include "codec/test/RowWriterV1.h" #include "common/base/Base.h" +#include "common/expression/ConstantExpression.h" +#include "common/expression/LogicalExpression.h" +#include "common/expression/PropertyExpression.h" +#include "common/expression/RelationalExpression.h" #include "common/fs/TempDir.h" #include "common/utils/IndexKeyUtils.h" #include "interface/gen-cpp2/common_types.h" diff --git a/src/tools/db-dump/CMakeLists.txt b/src/tools/db-dump/CMakeLists.txt index b04c86360cc..62f272621a0 100644 --- a/src/tools/db-dump/CMakeLists.txt +++ b/src/tools/db-dump/CMakeLists.txt @@ -46,6 +46,7 @@ set(tools_test_deps $ $ $ + $ $ ) diff --git a/src/tools/meta-dump/CMakeLists.txt b/src/tools/meta-dump/CMakeLists.txt index 8ba15751e2b..c5fa9d565d8 100644 --- a/src/tools/meta-dump/CMakeLists.txt +++ b/src/tools/meta-dump/CMakeLists.txt @@ -51,6 +51,7 @@ nebula_add_executable( $ $ $ + $ $ LIBRARIES ${ROCKSDB_LIBRARIES} diff --git a/tests/tck/features/lookup/LookUp.feature b/tests/tck/features/lookup/LookUp.feature index 6afca5f523f..ce6700e6697 100644 --- a/tests/tck/features/lookup/LookUp.feature +++ b/tests/tck/features/lookup/LookUp.feature @@ -840,7 +840,6 @@ Feature: LookUpTest_Vid_String "104":("yyy", 28), "105":("zzz", 21), "106":("kkk", 21), - "121":("Useless", 60), "121":("Useless", 20); INSERT VERTEX team(name) From f682359f33f720b6a93f6da18e56f84602dc7559 Mon Sep 17 00:00:00 2001 From: Yichen Wang <18348405+Aiee@users.noreply.github.com> Date: Tue, 9 Nov 2021 18:01:42 +0800 Subject: [PATCH 06/53] Update nebula-python installation in the test (#3290) * Update nebula-python installation * Add --user to pip install --- tests/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index 3c2f38e75d5..eec798a81bc 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -33,8 +33,7 @@ install-deps: install-nebula-py: install-deps git clone --branch master https://github.com/vesoft-inc/nebula-python $(CURR_DIR)/nebula-python cd $(CURR_DIR)/nebula-python \ - && pip3 install --user -r requirements.txt -i $(PYPI_MIRROR) \ - && python3 setup.py install --user + && pip3 install --user . -i $(PYPI_MIRROR) rm -rf $(CURR_DIR)/nebula-python gherkin-fmt: install-deps From 1a4c4bd2c35d3626608dbbaf3adaa2546d0136e8 Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Thu, 11 Nov 2021 00:37:08 +0800 Subject: [PATCH 07/53] Remove outdated test (#3291) --- src/storage/test/IndexScanTest.cpp | 1360 ---------------------------- 1 file changed, 1360 deletions(-) delete mode 100644 src/storage/test/IndexScanTest.cpp diff --git a/src/storage/test/IndexScanTest.cpp b/src/storage/test/IndexScanTest.cpp deleted file mode 100644 index 59d5df1ef6f..00000000000 --- a/src/storage/test/IndexScanTest.cpp +++ /dev/null @@ -1,1360 +0,0 @@ -/* Copyright (c) 2018 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include -#include - -#include "codec/RowReader.h" -#include "common/base/Base.h" -#include "common/fs/TempDir.h" -#include "common/utils/NebulaKeyUtils.h" -#include "storage/index/LookUpEdgeIndexProcessor.h" -#include "storage/index/LookUpVertexIndexProcessor.h" -#include "storage/test/TestUtils.h" - -DECLARE_uint32(raft_heartbeat_interval_secs); - -namespace nebula { -namespace storage { -using IndexValues = std::vector>; - -std::string indexStr(RowReader* reader, const nebula::cpp2::ColumnDef& col) { - auto res = RowReader::getPropByName(reader, col.get_name()); - if (!ok(res)) { - LOG(ERROR) << "Skip bad column prop " << col.get_name(); - return ""; - } - auto&& v = value(std::move(res)); - switch (col.get_type().get_type()) { - case nebula::cpp2::SupportedType::BOOL: { - auto val = boost::get(v); - std::string raw; - raw.reserve(sizeof(bool)); - raw.append(reinterpret_cast(&val), sizeof(bool)); - return raw; - } - case nebula::cpp2::SupportedType::INT: - case nebula::cpp2::SupportedType::TIMESTAMP: { - return NebulaKeyUtils::encodeInt64(boost::get(v)); - } - case nebula::cpp2::SupportedType::FLOAT: - case nebula::cpp2::SupportedType::DOUBLE: { - return NebulaKeyUtils::encodeDouble(boost::get(v)); - } - case nebula::cpp2::SupportedType::STRING: { - return boost::get(v); - } - default: - LOG(ERROR) << "Unknown type: " - << apache::thrift::util::enumNameSafe(col.get_type().get_type()); - } - return ""; -} - -IndexValues collectIndexValues(RowReader* reader, - const std::vector& cols) { - IndexValues values; - if (reader == nullptr) { - return values; - } - for (auto& col : cols) { - auto val = indexStr(reader, col); - values.emplace_back(col.get_type().get_type(), std::move(val)); - } - return values; -} - -static std::string genEdgeIndexKey(meta::SchemaManager* schemaMan, - const std::string& prop, - GraphSpaceID spaceId, - PartitionID partId, - EdgeType type, - std::shared_ptr& index, - VertexID src, - VertexID dst) { - auto reader = RowReaderWrapper::getEdgePropReader(schemaMan, prop, spaceId, type); - auto values = collectIndexValues(reader.get(), index->get_fields()); - auto indexKey = - NebulaKeyUtils::edgeIndexKeys(partId, index->get_index_id(), src, 0, dst, values)[0]; - return indexKey; -} - -static std::string genVertexIndexKey(meta::SchemaManager* schemaMan, - const std::string& prop, - GraphSpaceID spaceId, - PartitionID partId, - TagID tagId, - std::shared_ptr& index, - VertexID vId) { - auto reader = RowReaderWrapper::getTagPropReader(schemaMan, prop, spaceId, tagId); - auto values = collectIndexValues(reader.get(), index->get_fields()); - auto indexKey = NebulaKeyUtils::vertexIndexKey(partId, index->get_index_id(), vId, values); - return indexKey; -} - -static std::shared_ptr genEdgeSchemaProvider(int32_t intFieldsNum, - int32_t stringFieldsNum) { - nebula::cpp2::Schema schema; - for (auto i = 0; i < intFieldsNum; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::INT; - schema.columns.emplace_back(std::move(column)); - } - for (auto i = intFieldsNum; i < intFieldsNum + stringFieldsNum; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - schema.columns.emplace_back(std::move(column)); - } - return std::make_shared(std::move(schema)); -} - -/** - * It will generate tag SchemaProvider with some int fields and string fields - * */ -static std::shared_ptr genTagSchemaProvider(TagID tagId, - int32_t intFieldsNum, - int32_t stringFieldsNum) { - nebula::cpp2::Schema schema; - for (auto i = 0; i < intFieldsNum; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("tag_%d_col_%d", tagId, i); - column.type.type = nebula::cpp2::SupportedType::INT; - schema.columns.emplace_back(std::move(column)); - } - for (auto i = intFieldsNum; i < intFieldsNum + stringFieldsNum; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("tag_%d_col_%d", tagId, i); - column.type.type = nebula::cpp2::SupportedType::STRING; - schema.columns.emplace_back(std::move(column)); - } - return std::make_shared(std::move(schema)); -} - -static std::unique_ptr mockSchemaMan(GraphSpaceID spaceId, - EdgeType edgeType, - TagID tagId) { - auto* schemaMan = new AdHocSchemaManager(); - schemaMan->addEdgeSchema( - spaceId /*space id*/, edgeType /*edge type*/, genEdgeSchemaProvider(10, 10)); - schemaMan->addTagSchema(spaceId /*space id*/, tagId, genTagSchemaProvider(tagId, 3, 3)); - std::unique_ptr sm(schemaMan); - return sm; -} - -void mockData(kvstore::KVStore* kv, - meta::SchemaManager* schemaMan, - meta::IndexManager* indexMan, - EdgeType edgeType, - TagID tagId, - GraphSpaceID spaceId) { - auto vindex = indexMan->getTagIndex(spaceId, tagId).value(); - auto eindex = indexMan->getEdgeIndex(spaceId, edgeType).value(); - for (auto partId = 0; partId < 3; partId++) { - std::vector data; - for (auto vertexId = partId * 10; vertexId < (partId + 1) * 10; vertexId++) { - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, 0); - RowWriter writer; - for (uint64_t numInt = 0; numInt < 3; numInt++) { - writer << (numInt + 1); - } - for (auto numString = 3; numString < 6; numString++) { - writer << folly::stringPrintf("tag_string_col_%d", numString); - } - auto val = writer.encode(); - auto indexKey = genVertexIndexKey(schemaMan, val, spaceId, partId, tagId, vindex, vertexId); - data.emplace_back(std::move(indexKey), ""); - data.emplace_back(std::move(key), std::move(val)); - - for (auto dstId = 10001; dstId <= 10007; dstId++) { - VLOG(3) << "Write part " << partId << ", vertex " << vertexId << ", dst " << dstId; - auto ekey = NebulaKeyUtils::edgeKey(partId, vertexId, edgeType, 0, dstId); - RowWriter ewriter(nullptr); - for (uint64_t numInt = 0; numInt < 10; numInt++) { - ewriter << (numInt + 1); - } - for (auto numString = 10; numString < 20; numString++) { - ewriter << folly::stringPrintf("string_col_%d", numString); - } - auto eval = ewriter.encode(); - auto edgeIndex = - genEdgeIndexKey(schemaMan, eval, spaceId, partId, edgeType, eindex, vertexId, dstId); - data.emplace_back(std::move(edgeIndex), ""); - data.emplace_back(std::move(ekey), std::move(eval)); - } - } - folly::Baton baton; - kv->asyncMultiPut(0, partId, std::move(data), [&](cpp2::ErrorCode code) { - EXPECT_EQ(code, cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); - } -} - -static std::unique_ptr mockIndexMan(GraphSpaceID spaceId = 0, - TagID tagId = 3001, - EdgeType edgeType = 101) { - auto* indexMan = new AdHocIndexManager(); - { - std::vector cols; - for (auto i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("tag_%d_col_%d", tagId, i); - column.type.type = nebula::cpp2::SupportedType::INT; - cols.emplace_back(std::move(column)); - } - for (auto i = 3; i < 6; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("tag_%d_col_%d", tagId, i); - column.type.type = nebula::cpp2::SupportedType::STRING; - cols.emplace_back(std::move(column)); - } - indexMan->addTagIndex(spaceId, tagId, tagId, std::move(cols)); - } - { - std::vector cols; - for (int32_t i = 0; i < 10; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::INT; - cols.emplace_back(std::move(column)); - } - for (int32_t i = 10; i < 20; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - cols.emplace_back(std::move(column)); - } - indexMan->addEdgeIndex(spaceId, edgeType, edgeType, std::move(cols)); - } - std::unique_ptr im(indexMan); - return im; -} - -static cpp2::LookUpVertexIndexResp execLookupVertices(const std::string& filter, - bool hasReturnCols = true) { - fs::TempDir rootPath("/tmp/execLookupVertices.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - TagID tagId = 3001; - EdgeType type = 101; - LOG(INFO) << "Prepare meta..."; - auto schemaMan = mockSchemaMan(spaceId, type, tagId); - auto indexMan = mockIndexMan(spaceId, tagId, type); - mockData(kv.get(), schemaMan.get(), indexMan.get(), type, tagId, spaceId); - auto* processor = - LookUpVertexIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - if (hasReturnCols) { - decltype(req.return_columns) cols; - cols.emplace_back("tag_3001_col_0"); - cols.emplace_back("tag_3001_col_1"); - cols.emplace_back("tag_3001_col_3"); - cols.emplace_back("tag_3001_col_4"); - req.set_return_columns(cols); - } - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(tagId); - - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -static cpp2::LookUpEdgeIndexResp execLookupEdges(const std::string& filter, - bool hasReturnCols = true) { - fs::TempDir rootPath("/tmp/execLookupEdges.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - TagID tagId = 3001; - EdgeType type = 101; - LOG(INFO) << "Prepare meta..."; - auto schemaMan = mockSchemaMan(spaceId, type, tagId); - auto indexMan = mockIndexMan(spaceId, tagId, type); - mockData(kv.get(), schemaMan.get(), indexMan.get(), type, tagId, spaceId); - auto* processor = - LookUpEdgeIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - if (hasReturnCols) { - decltype(req.return_columns) cols; - cols.emplace_back("col_0"); - cols.emplace_back("col_1"); - cols.emplace_back("col_3"); - cols.emplace_back("col_4"); - cols.emplace_back("col_10"); - cols.emplace_back("col_11"); - cols.emplace_back("col_13"); - cols.emplace_back("col_14"); - req.set_return_columns(cols); - } - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(type); - - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -static cpp2::LookUpEdgeIndexResp checkLookupEdgesString(const std::string& filter) { - fs::TempDir rootPath("/tmp/checkLookupEdgesString.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - EdgeType type = 101; - - nebula::cpp2::Schema schema; - for (auto i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - schema.columns.emplace_back(std::move(column)); - } - auto sp = std::make_shared(std::move(schema)); - auto* sm = new AdHocSchemaManager(); - sm->addEdgeSchema(spaceId /*space id*/, type /*edge type*/, sp); - std::unique_ptr schemaMan(sm); - - auto* im = new AdHocIndexManager(); - { - std::vector cols; - for (int32_t i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - cols.emplace_back(std::move(column)); - } - im->addEdgeIndex(spaceId, type, type, std::move(cols)); - } - std::unique_ptr indexMan(im); - auto eindex = indexMan->getEdgeIndex(spaceId, type).value(); - sleep(FLAGS_raft_heartbeat_interval_secs); - { - for (auto partId = 0; partId < 3; partId++) { - std::vector data; - { - VertexID srcId = 1; - VertexID dstId = 10; - auto eKey = NebulaKeyUtils::edgeKey(partId, srcId, type, 0, dstId); - RowWriter ewriter(nullptr); - ewriter << "AB" - << "CAB" - << "CABC"; - auto eval = ewriter.encode(); - auto edgeIndex = - genEdgeIndexKey(schemaMan.get(), eval, spaceId, partId, type, eindex, srcId, dstId); - data.emplace_back(std::move(edgeIndex), ""); - data.emplace_back(std::move(eKey), std::move(eval)); - } - { - VertexID srcId = 2; - VertexID dstId = 20; - auto eKey = NebulaKeyUtils::edgeKey(partId, srcId, type, 0, dstId); - RowWriter ewriter(nullptr); - ewriter << "ABC" - << "ABC" - << "ABC"; - auto eval = ewriter.encode(); - auto edgeIndex = - genEdgeIndexKey(schemaMan.get(), eval, spaceId, partId, type, eindex, srcId, dstId); - data.emplace_back(std::move(edgeIndex), ""); - data.emplace_back(std::move(eKey), std::move(eval)); - } - folly::Baton baton; - kv->asyncMultiPut(0, partId, std::move(data), [&](cpp2::ErrorCode code) { - EXPECT_EQ(code, cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); - } - } - auto* processor = - LookUpEdgeIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - decltype(req.return_columns) cols; - cols.emplace_back("col_0"); - cols.emplace_back("col_1"); - cols.emplace_back("col_2"); - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(type); - req.set_return_columns(cols); - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -static cpp2::LookUpVertexIndexResp checkLookupVerticesString(const std::string& filter) { - fs::TempDir rootPath("/tmp/checkLookupVerticesString.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - TagID tagId = 3001; - nebula::cpp2::Schema schema; - for (auto i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - schema.columns.emplace_back(std::move(column)); - } - auto sp = std::make_shared(std::move(schema)); - auto* sm = new AdHocSchemaManager(); - sm->addTagSchema(spaceId /*space id*/, tagId /*Tag ID*/, sp); - std::unique_ptr schemaMan(sm); - - auto* im = new AdHocIndexManager(); - { - std::vector cols; - for (int32_t i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::STRING; - cols.emplace_back(std::move(column)); - } - im->addTagIndex(spaceId, tagId, tagId, std::move(cols)); - } - std::unique_ptr indexMan(im); - auto vindex = indexMan->getTagIndex(spaceId, tagId).value(); - sleep(FLAGS_raft_heartbeat_interval_secs); - { - for (auto partId = 0; partId < 3; partId++) { - std::vector data; - auto version = std::numeric_limits::max() - 1; - { - VertexID vertexId = 100; - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, version); - RowWriter twriter(nullptr); - twriter << "AB" - << "CAB" - << "CABC"; - auto tval = twriter.encode(); - auto vIndex = - genVertexIndexKey(schemaMan.get(), tval, spaceId, partId, tagId, vindex, vertexId); - data.emplace_back(std::move(vIndex), ""); - data.emplace_back(std::move(key), std::move(tval)); - } - { - VertexID vertexId = 200; - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, version); - RowWriter twriter(nullptr); - twriter << "ABC" - << "ABC" - << "ABC"; - auto tval = twriter.encode(); - auto vIndex = - genVertexIndexKey(schemaMan.get(), tval, spaceId, partId, tagId, vindex, vertexId); - data.emplace_back(std::move(vIndex), ""); - data.emplace_back(std::move(key), std::move(tval)); - } - folly::Baton baton; - kv->asyncMultiPut(0, partId, std::move(data), [&](cpp2::ErrorCode code) { - EXPECT_EQ(code, cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); - } - } - auto* processor = - LookUpVertexIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - decltype(req.return_columns) cols; - cols.emplace_back("col_0"); - cols.emplace_back("col_1"); - cols.emplace_back("col_2"); - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(tagId); - req.set_return_columns(cols); - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -static cpp2::LookUpEdgeIndexResp checkLookupEdgesDouble(const std::string& filter) { - fs::TempDir rootPath("/tmp/checkLookupEdgesDouble.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - EdgeType type = 101; - - nebula::cpp2::Schema schema; - for (auto i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::DOUBLE; - schema.columns.emplace_back(std::move(column)); - } - auto sp = std::make_shared(std::move(schema)); - auto* sm = new AdHocSchemaManager(); - sm->addEdgeSchema(spaceId /*space id*/, type /*edge type*/, sp); - std::unique_ptr schemaMan(sm); - - auto* im = new AdHocIndexManager(); - { - std::vector cols; - for (int32_t i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::DOUBLE; - cols.emplace_back(std::move(column)); - } - im->addEdgeIndex(spaceId, type, type, std::move(cols)); - } - std::unique_ptr indexMan(im); - auto eindex = indexMan->getEdgeIndex(spaceId, type).value(); - sleep(FLAGS_raft_heartbeat_interval_secs); - { - for (auto partId = 0; partId < 3; partId++) { - std::vector data; - { - VertexID srcId = 1; - VertexID dstId = 10; - auto eKey = NebulaKeyUtils::edgeKey(partId, srcId, type, 0, dstId); - RowWriter ewriter(nullptr); - ewriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto eval = ewriter.encode(); - auto edgeIndex = - genEdgeIndexKey(schemaMan.get(), eval, spaceId, partId, type, eindex, srcId, dstId); - data.emplace_back(std::move(edgeIndex), ""); - data.emplace_back(std::move(eKey), std::move(eval)); - } - { - VertexID srcId = 2; - VertexID dstId = 20; - auto eKey = NebulaKeyUtils::edgeKey(partId, srcId, type, 0, dstId); - RowWriter ewriter(nullptr); - ewriter << boost::get(2.2) << boost::get(0.0) << boost::get(-2.2); - auto eval = ewriter.encode(); - auto edgeIndex = - genEdgeIndexKey(schemaMan.get(), eval, spaceId, partId, type, eindex, srcId, dstId); - data.emplace_back(std::move(edgeIndex), ""); - data.emplace_back(std::move(eKey), std::move(eval)); - } - folly::Baton baton; - kv->asyncMultiPut(0, partId, std::move(data), [&](cpp2::ErrorCode code) { - EXPECT_EQ(code, cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); - } - } - auto* processor = - LookUpEdgeIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - decltype(req.return_columns) cols; - cols.emplace_back("col_0"); - cols.emplace_back("col_1"); - cols.emplace_back("col_2"); - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(type); - req.set_return_columns(cols); - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -static cpp2::LookUpVertexIndexResp checkLookupVerticesDouble(const std::string& filter) { - fs::TempDir rootPath("/tmp/checkLookupVerticesDouble.XXXXXX"); - std::unique_ptr kv = TestUtils::initKV(rootPath.path()); - GraphSpaceID spaceId = 0; - TagID tagId = 3001; - nebula::cpp2::Schema schema; - for (auto i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::DOUBLE; - schema.columns.emplace_back(std::move(column)); - } - auto sp = std::make_shared(std::move(schema)); - auto* sm = new AdHocSchemaManager(); - sm->addTagSchema(spaceId /*space id*/, tagId /*Tag ID*/, sp); - std::unique_ptr schemaMan(sm); - - auto* im = new AdHocIndexManager(); - { - std::vector cols; - for (int32_t i = 0; i < 3; i++) { - nebula::cpp2::ColumnDef column; - column.name = folly::stringPrintf("col_%d", i); - column.type.type = nebula::cpp2::SupportedType::DOUBLE; - cols.emplace_back(std::move(column)); - } - im->addTagIndex(spaceId, tagId, tagId, std::move(cols)); - } - std::unique_ptr indexMan(im); - auto vindex = indexMan->getTagIndex(spaceId, tagId).value(); - sleep(FLAGS_raft_heartbeat_interval_secs); - { - for (auto partId = 0; partId < 3; partId++) { - std::vector data; - auto version = std::numeric_limits::max() - 1; - { - VertexID vertexId = 100; - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, version); - RowWriter twriter(nullptr); - twriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto tval = twriter.encode(); - auto vIndex = - genVertexIndexKey(schemaMan.get(), tval, spaceId, partId, tagId, vindex, vertexId); - data.emplace_back(std::move(vIndex), ""); - data.emplace_back(std::move(key), std::move(tval)); - } - { - VertexID vertexId = 200; - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, version); - RowWriter twriter(nullptr); - twriter << boost::get(2.2) << boost::get(0.0) << boost::get(-2.2); - auto tval = twriter.encode(); - auto vIndex = - genVertexIndexKey(schemaMan.get(), tval, spaceId, partId, tagId, vindex, vertexId); - data.emplace_back(std::move(vIndex), ""); - data.emplace_back(std::move(key), std::move(tval)); - } - folly::Baton baton; - kv->asyncMultiPut(0, partId, std::move(data), [&](cpp2::ErrorCode code) { - EXPECT_EQ(code, cpp2::ErrorCode::SUCCEEDED); - baton.post(); - }); - baton.wait(); - } - } - auto* processor = - LookUpVertexIndexProcessor::instance(kv.get(), schemaMan.get(), indexMan.get(), nullptr); - cpp2::LookUpIndexRequest req; - decltype(req.parts) parts; - parts.emplace_back(0); - parts.emplace_back(1); - parts.emplace_back(2); - decltype(req.return_columns) cols; - cols.emplace_back("col_0"); - cols.emplace_back("col_1"); - cols.emplace_back("col_2"); - req.set_space_id(spaceId); - req.set_parts(std::move(parts)); - req.set_index_id(tagId); - req.set_return_columns(cols); - req.set_filter(filter); - auto f = processor->getFuture(); - processor->process(req); - return std::move(f).get(); -} - -TEST(IndexScanTest, SimpleScanTest) { - { - LOG(INFO) << "Build filter..."; - auto* prop = new std::string("tag_3001_col_0"); - auto* alias = new std::string("3001"); - auto* aliaExp = new AliasPropertyExpression(new std::string(""), alias, prop); - auto* priExp = new PrimaryExpression(1L); - auto relExp = - std::make_unique(aliaExp, RelationalExpression::Operator::EQ, priExp); - auto resp = execLookupVertices(Expression::encode(relExp.get())); - - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(4, resp.get_schema()->get_columns().size()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - auto* prop = new std::string("col_0"); - auto* alias = new std::string("101"); - auto* aliaExp = new AliasPropertyExpression(new std::string(""), alias, prop); - auto* priExp = new PrimaryExpression(1L); - auto relExp = - std::make_unique(aliaExp, RelationalExpression::Operator::EQ, priExp); - auto resp = execLookupEdges(Expression::encode(relExp.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(8, resp.get_schema()->get_columns().size()); - EXPECT_EQ(210, resp.rows.size()); - } -} - -TEST(IndexScanTest, AccurateScanTest) { - { - LOG(INFO) << "Build filter..."; - /** - * where tag_3001_col_0 == 1 and - * tag_3001_col_1 == 2 and - * tag_3001_col_2 == 3 - */ - auto* col0 = new std::string("tag_3001_col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("tag_3001_col_1"); - auto* alias1 = new std::string("3001"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(2L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::EQ, pe1); - - auto* col2 = new std::string("tag_3001_col_2"); - auto* alias2 = new std::string("3001"); - auto* ape2 = new AliasPropertyExpression(new std::string(""), alias2, col2); - auto* pe2 = new PrimaryExpression(3L); - auto* r3 = new RelationalExpression(ape2, RelationalExpression::Operator::EQ, pe2); - auto* le1 = new LogicalExpression(r1, LogicalExpression::AND, r2); - - auto logExp = std::make_unique(le1, LogicalExpression::AND, r3); - auto resp = execLookupVertices(Expression::encode(logExp.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(4, resp.get_schema()->get_columns().size()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where col_0 == 1 and - * col_1 == 2 and - * col_2 == 3 - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("col_1"); - auto* alias1 = new std::string("101"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(2L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::EQ, pe1); - - auto* col2 = new std::string("col_2"); - auto* alias2 = new std::string("101"); - auto* ape2 = new AliasPropertyExpression(new std::string(""), alias2, col2); - auto* pe2 = new PrimaryExpression(3L); - auto* r3 = new RelationalExpression(ape2, RelationalExpression::Operator::EQ, pe2); - auto* le1 = new LogicalExpression(r1, LogicalExpression::AND, r2); - - auto logExp = std::make_unique(le1, LogicalExpression::AND, r3); - auto resp = execLookupEdges(Expression::encode(logExp.get())); - - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(8, resp.get_schema()->get_columns().size()); - EXPECT_EQ(210, resp.rows.size()); - } -} -TEST(IndexScanTest, SeekScanTest) { - { - LOG(INFO) << "Build filter..."; - /** - * where tag_3001_col_0 > 0 - */ - auto* col0 = new std::string("tag_3001_col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(0L); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = execLookupVertices(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(4, resp.get_schema()->get_columns().size()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where col_0 > 0 - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(0L); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = execLookupEdges(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(8, resp.get_schema()->get_columns().size()); - EXPECT_EQ(210, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where tag_3001_col_1 > 0 - */ - auto* col0 = new std::string("tag_3001_col_1"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(0L); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = execLookupVertices(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(4, resp.get_schema()->get_columns().size()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where col_1 > 0 - */ - auto* col0 = new std::string("col_1"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(0L); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = execLookupEdges(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(8, resp.get_schema()->get_columns().size()); - EXPECT_EQ(210, resp.rows.size()); - } -} - -TEST(IndexScanTest, PrefixScanTest) { - { - LOG(INFO) << "Build filter..."; - /** - * where tag_3001_col_0 == 1 and - * tag_3001_col_1 > 1 - */ - auto* col0 = new std::string("tag_3001_col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("tag_3001_col_1"); - auto* alias1 = new std::string("3001"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(1L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::GT, pe1); - - auto logExp = std::make_unique(r1, LogicalExpression::AND, r2); - auto resp = execLookupVertices(Expression::encode(logExp.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(4, resp.get_schema()->get_columns().size()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where col_0 == 1 and - * col_1 > 1 - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("col_1"); - auto* alias1 = new std::string("101"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(1L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::GT, pe1); - - auto logExp = std::make_unique(r1, LogicalExpression::AND, r2); - auto resp = execLookupEdges(Expression::encode(logExp.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(8, resp.get_schema()->get_columns().size()); - EXPECT_EQ(210, resp.rows.size()); - } -} - -TEST(IndexScanTest, NoReturnColumnsTest) { - { - LOG(INFO) << "Build filter..."; - /** - * where tag_3001_col_0 == 1 and - * tag_3001_col_1 > 1 - */ - auto* col0 = new std::string("tag_3001_col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("tag_3001_col_1"); - auto* alias1 = new std::string("3001"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(1L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::GT, pe1); - - auto logExp = std::make_unique(r1, LogicalExpression::AND, r2); - auto resp = execLookupVertices(Expression::encode(logExp.get()), false); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(NULL, resp.get_schema()); - EXPECT_EQ(30, resp.rows.size()); - } - { - LOG(INFO) << "Build filter..."; - /** - * where col_0 == 1 and - * col_1 > 1 - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - auto* pe0 = new PrimaryExpression(1L); - auto* r1 = new RelationalExpression(ape0, RelationalExpression::Operator::EQ, pe0); - - auto* col1 = new std::string("col_1"); - auto* alias1 = new std::string("101"); - auto* ape1 = new AliasPropertyExpression(new std::string(""), alias1, col1); - auto* pe1 = new PrimaryExpression(1L); - auto* r2 = new RelationalExpression(ape1, RelationalExpression::Operator::GT, pe1); - - auto logExp = std::make_unique(r1, LogicalExpression::AND, r2); - auto resp = execLookupEdges(Expression::encode(logExp.get()), false); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(NULL, resp.get_schema()); - EXPECT_EQ(210, resp.rows.size()); - } -} - -TEST(IndexScanTest, EdgeStringTest) { - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_0 == "ABC" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c0("ABC"); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "ABC" - << "ABC" - << "ABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(2, row.get_key().get_src()); - EXPECT_EQ(20, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_0 == "AB" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c0("AB"); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "AB" - << "CAB" - << "CABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(1, row.get_key().get_src()); - EXPECT_EQ(10, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_1 == "CAB" - */ - auto* col0 = new std::string("col_1"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c1("CAB"); - auto* pe0 = new PrimaryExpression(c1); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "AB" - << "CAB" - << "CABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(1, row.get_key().get_src()); - EXPECT_EQ(10, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } -} - -TEST(IndexScanTest, VertexStringTest) { - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_0 == "ABC" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c0("ABC"); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "ABC" - << "ABC" - << "ABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(200, row.get_vertex_id()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_0 == "AB" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c0("AB"); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "AB" - << "CAB" - << "CABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(100, row.get_vertex_id()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "AB" << "CAB" << "CABC" - * "ABC" << "ABC" << "ABC" - * - * where col_1 == "CAB" - */ - auto* col0 = new std::string("col_1"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - std::string c1("CAB"); - auto* pe0 = new PrimaryExpression(c1); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesString(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << "AB" - << "CAB" - << "CABC"; - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(100, row.get_vertex_id()); - } - } -} - -TEST(IndexScanTest, EdgeDoubleTest) { - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_0 == "1.1" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - double c0(1.1); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(1, row.get_key().get_src()); - EXPECT_EQ(10, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_2 == "-1.1" - */ - auto* col2 = new std::string("col_2"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col2); - double c2(-1.1); - auto* pe0 = new PrimaryExpression(c2); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(1, row.get_key().get_src()); - EXPECT_EQ(10, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_0 > "1.1" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - double c0(1.1); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = checkLookupEdgesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(2.2) << boost::get(0.0) << boost::get(-2.2); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(2, row.get_key().get_src()); - EXPECT_EQ(20, row.get_key().get_dst()); - EXPECT_EQ(101, row.get_key().get_edge_type()); - EXPECT_EQ(0, row.get_key().get_ranking()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_1 == "0.0" - */ - auto* col1 = new std::string("col_1"); - auto* alias0 = new std::string("101"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col1); - double c1(0.0); - auto* pe0 = new PrimaryExpression(c1); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupEdgesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(6, resp.rows.size()); - } -} - -TEST(IndexScanTest, VertexDoubleTest) { - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_0 == "1.1" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - double c0(1.1); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(100, row.get_vertex_id()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_2 == "-1.1" - */ - auto* col2 = new std::string("col_2"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col2); - double c2(-1.1); - auto* pe0 = new PrimaryExpression(c2); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(1.1) << boost::get(0.0) << boost::get(-1.1); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(100, row.get_vertex_id()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_0 > "1.1" - */ - auto* col0 = new std::string("col_0"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col0); - double c0(1.1); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::GT, pe0); - auto resp = checkLookupVerticesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(3, resp.rows.size()); - RowWriter ewriter(nullptr); - ewriter << boost::get(2.2) << boost::get(0.0) << boost::get(-2.2); - auto eval = ewriter.encode(); - for (const auto& row : resp.rows) { - EXPECT_EQ(eval, row.get_props()); - EXPECT_EQ(200, row.get_vertex_id()); - } - } - { - LOG(INFO) << "Build filter..."; - /** - * Total rows : - * col_0 col_1 col_2 - * "1.1" << "0.0" << "-1.1" - * "2.2" << "0.0" << "-2.2" - * - * where col_1 == "0.0" - */ - auto* col1 = new std::string("col_1"); - auto* alias0 = new std::string("3001"); - auto* ape0 = new AliasPropertyExpression(new std::string(""), alias0, col1); - double c0(0.0); - auto* pe0 = new PrimaryExpression(c0); - auto r1 = std::make_unique(ape0, RelationalExpression::Operator::EQ, pe0); - auto resp = checkLookupVerticesDouble(Expression::encode(r1.get())); - EXPECT_EQ(0, resp.result.failed_codes.size()); - EXPECT_EQ(3, resp.get_schema()->get_columns().size()); - EXPECT_EQ(6, resp.rows.size()); - } -} - -} // namespace storage -} // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - return RUN_ALL_TESTS(); -} From 4f832fb0148693e7584a3230cffc98417c8548ff Mon Sep 17 00:00:00 2001 From: George <58841610+Shinji-IkariG@users.noreply.github.com> Date: Thu, 11 Nov 2021 10:24:07 +0800 Subject: [PATCH 08/53] Update the runner associated with the workflow (#3292) * Update nightly.yml update runner label * Update pull_request.yml update runner label * Update release.yml update runner label Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .github/workflows/nightly.yml | 6 +++--- .github/workflows/pull_request.yml | 2 +- .github/workflows/release.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 947a55d1a75..d02a39a7b08 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -15,7 +15,7 @@ defaults: jobs: package: name: build package - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: @@ -58,7 +58,7 @@ jobs: docker: name: build docker image - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: @@ -89,7 +89,7 @@ jobs: coverage: name: coverage - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 55dfa57a1c4..73b00e144f0 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -45,7 +45,7 @@ jobs: build: name: build needs: lint - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd748970dea..1e9e0893cc0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ defaults: jobs: package: name: build package - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: @@ -66,7 +66,7 @@ jobs: docker_build: name: docker-build - runs-on: self-hosted + runs-on: [self-hosted, nebula] strategy: fail-fast: false matrix: From 1d0c583c992752af158f11beb6f94312c5440392 Mon Sep 17 00:00:00 2001 From: Sophie-Xie <84560950+Sophie-Xie@users.noreply.github.com> Date: Thu, 11 Nov 2021 11:59:30 +0800 Subject: [PATCH 09/53] Delete .github/PULL_REQUEST_TEMPLATE directory (#3241) Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .../pull_request_template.md | 51 ------------------- 1 file changed, 51 deletions(-) delete mode 100644 .github/PULL_REQUEST_TEMPLATE/pull_request_template.md diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md deleted file mode 100644 index 3bfce28e306..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +++ /dev/null @@ -1,51 +0,0 @@ - - -### What changes were proposed in this pull request? - - - -### Why are the changes needed? - - -### Will break the compatibility? How if so? - - -### Does this PR introduce any user-facing change? - - - -### How was this patch tested? - - -### Checklist - - -- [ ] I've run the tests to see all new and existing tests pass -- [ ] If this Pull Request resolves an issue, I linked to the issue in the text above -- [ ] I've informed the technical writer about the documentation change if necessary From 8e0028f306fedffb68f3f2c45760abdec0d2d67c Mon Sep 17 00:00:00 2001 From: Shylock Hg <33566796+Shylock-Hg@users.noreply.github.com> Date: Thu, 11 Nov 2021 16:38:34 +0800 Subject: [PATCH 10/53] Feature/scan multiple parts (#3262) * Scan multiple parts. * Add multiple parts test case. * Add limit test. * Remove unused include. * Support multiple tags. * Fix license header. * Optimize the extra read operations. * Fix compile error. * Skip invalid tag in one loop. * Avoid extra logical. --- src/clients/storage/GraphStorageClient.cpp | 80 +++-- src/clients/storage/GraphStorageClient.h | 13 +- src/clients/storage/StorageClientBase-inl.h | 23 ++ src/clients/storage/StorageClientBase.h | 11 +- src/interface/storage.thrift | 58 ++-- src/storage/exec/EdgeNode.h | 21 +- src/storage/exec/RelNode.h | 2 + src/storage/exec/ScanNode.h | 274 +++++++++++++++++ src/storage/exec/TagNode.h | 21 +- src/storage/query/ScanEdgeProcessor.cpp | 169 +++++++---- src/storage/query/ScanEdgeProcessor.h | 25 +- src/storage/query/ScanVertexProcessor.cpp | 167 +++++++---- src/storage/query/ScanVertexProcessor.h | 25 +- src/storage/test/ScanEdgeTest.cpp | 124 +++++++- src/storage/test/ScanVertexTest.cpp | 315 +++++++++++++++++--- 15 files changed, 1089 insertions(+), 239 deletions(-) create mode 100644 src/storage/exec/ScanNode.h diff --git a/src/clients/storage/GraphStorageClient.cpp b/src/clients/storage/GraphStorageClient.cpp index 1c5cb30a589..206502e10e8 100644 --- a/src/clients/storage/GraphStorageClient.cpp +++ b/src/clients/storage/GraphStorageClient.cpp @@ -559,36 +559,68 @@ StorageRpcRespFuture GraphStorageClient::lookupAndTr }); } -folly::Future> GraphStorageClient::scanEdge( - cpp2::ScanEdgeRequest req, folly::EventBase* evb) { - std::pair request; - auto host = this->getLeader(req.get_space_id(), req.get_part_id()); - if (!host.ok()) { - return folly::makeFuture>(host.status()); +StorageRpcRespFuture GraphStorageClient::scanEdge( + const CommonRequestParam& param, + const cpp2::EdgeProp& edgeProp, + int64_t limit, + const Expression* filter) { + std::unordered_map requests; + auto status = getHostPartsWithCursor(param.space); + if (!status.ok()) { + return folly::makeFuture>( + std::runtime_error(status.status().toString())); + } + auto& clusters = status.value(); + for (const auto& c : clusters) { + auto& host = c.first; + auto& req = requests[host]; + req.set_space_id(param.space); + req.set_parts(std::move(c.second)); + req.set_return_columns(edgeProp); + req.set_limit(limit); + if (filter != nullptr) { + req.set_filter(filter->encode()); + } + req.set_common(param.toReqCommon()); } - request.first = std::move(host).value(); - request.second = std::move(req); - return getResponse(evb, - std::move(request), - [](cpp2::GraphStorageServiceAsyncClient* client, - const cpp2::ScanEdgeRequest& r) { return client->future_scanEdge(r); }); + return collectResponse(param.evb, + std::move(requests), + [](cpp2::GraphStorageServiceAsyncClient* client, + const cpp2::ScanEdgeRequest& r) { return client->future_scanEdge(r); }); } -folly::Future> GraphStorageClient::scanVertex( - cpp2::ScanVertexRequest req, folly::EventBase* evb) { - std::pair request; - auto host = this->getLeader(req.get_space_id(), req.get_part_id()); - if (!host.ok()) { - return folly::makeFuture>(host.status()); +StorageRpcRespFuture GraphStorageClient::scanVertex( + const CommonRequestParam& param, + const std::vector& vertexProp, + int64_t limit, + const Expression* filter) { + std::unordered_map requests; + auto status = getHostPartsWithCursor(param.space); + if (!status.ok()) { + return folly::makeFuture>( + std::runtime_error(status.status().toString())); + } + auto& clusters = status.value(); + for (const auto& c : clusters) { + auto& host = c.first; + auto& req = requests[host]; + req.set_space_id(param.space); + req.set_parts(std::move(c.second)); + req.set_return_columns(vertexProp); + req.set_limit(limit); + if (filter != nullptr) { + req.set_filter(filter->encode()); + } + req.set_common(param.toReqCommon()); } - request.first = std::move(host).value(); - request.second = std::move(req); - return getResponse(evb, - std::move(request), - [](cpp2::GraphStorageServiceAsyncClient* client, - const cpp2::ScanVertexRequest& r) { return client->future_scanVertex(r); }); + return collectResponse( + param.evb, + std::move(requests), + [](cpp2::GraphStorageServiceAsyncClient* client, const cpp2::ScanVertexRequest& r) { + return client->future_scanVertex(r); + }); } StatusOr> GraphStorageClient::getIdFromRow( diff --git a/src/clients/storage/GraphStorageClient.h b/src/clients/storage/GraphStorageClient.h index 9ce6258e899..9b917b36add 100644 --- a/src/clients/storage/GraphStorageClient.h +++ b/src/clients/storage/GraphStorageClient.h @@ -130,11 +130,16 @@ class GraphStorageClient : public StorageClientBase lookupAndTraverse( const CommonRequestParam& param, cpp2::IndexSpec indexSpec, cpp2::TraverseSpec traverseSpec); - folly::Future> scanEdge(cpp2::ScanEdgeRequest req, - folly::EventBase* evb = nullptr); + StorageRpcRespFuture scanEdge(const CommonRequestParam& param, + const cpp2::EdgeProp& vertexProp, + int64_t limit, + const Expression* filter); - folly::Future> scanVertex(cpp2::ScanVertexRequest req, - folly::EventBase* evb = nullptr); + StorageRpcRespFuture scanVertex( + const CommonRequestParam& param, + const std::vector& vertexProp, + int64_t limit, + const Expression* filter); private: StatusOr> getIdFromRow(GraphSpaceID space, diff --git a/src/clients/storage/StorageClientBase-inl.h b/src/clients/storage/StorageClientBase-inl.h index ddb69a53232..6a3f7ac9651 100644 --- a/src/clients/storage/StorageClientBase-inl.h +++ b/src/clients/storage/StorageClientBase-inl.h @@ -329,5 +329,28 @@ StorageClientBase::getHostParts(GraphSpaceID spaceId) const { return hostParts; } +template +StatusOr>> +StorageClientBase::getHostPartsWithCursor(GraphSpaceID spaceId) const { + std::unordered_map> hostParts; + auto status = metaClient_->partsNum(spaceId); + if (!status.ok()) { + return Status::Error("Space not found, spaceid: %d", spaceId); + } + + // TODO support cursor + cpp2::ScanCursor c; + c.set_has_next(false); + auto parts = status.value(); + for (auto partId = 1; partId <= parts; partId++) { + auto leader = getLeader(spaceId, partId); + if (!leader.ok()) { + return leader.status(); + } + hostParts[leader.value()].emplace(partId, c); + } + return hostParts; +} + } // namespace storage } // namespace nebula diff --git a/src/clients/storage/StorageClientBase.h b/src/clients/storage/StorageClientBase.h index 900e4001e22..a9d347d10ed 100644 --- a/src/clients/storage/StorageClientBase.h +++ b/src/clients/storage/StorageClientBase.h @@ -166,6 +166,9 @@ class StorageClientBase { std::unordered_map>>> clusterIdsToHosts(GraphSpaceID spaceId, const Container& ids, GetIdFunc f) const; + StatusOr>> + getHostPartsWithCursor(GraphSpaceID spaceId) const; + virtual StatusOr getPartHosts(GraphSpaceID spaceId, PartitionID partId) const { CHECK(metaClient_ != nullptr); return metaClient_->getPartHostsFromCache(spaceId, partId); @@ -208,14 +211,6 @@ class StorageClientBase { return {req.get_part_id()}; } - std::vector getReqPartsId(const cpp2::ScanEdgeRequest& req) const { - return {req.get_part_id()}; - } - - std::vector getReqPartsId(const cpp2::ScanVertexRequest& req) const { - return {req.get_part_id()}; - } - bool isValidHostPtr(const HostAddr* addr) { return addr != nullptr && !addr->host.empty() && addr->port != 0; } diff --git a/src/interface/storage.thrift b/src/interface/storage.thrift index be82a0ca2e3..e4ff187305c 100644 --- a/src/interface/storage.thrift +++ b/src/interface/storage.thrift @@ -560,24 +560,29 @@ struct LookupAndTraverseRequest { * End of Index section */ +struct ScanCursor { + 3: bool has_next, + // next start key of scan, only valid when has_next is true + 4: optional binary next_cursor, +} + struct ScanVertexRequest { 1: common.GraphSpaceID space_id, - 2: common.PartitionID part_id, - // start key of this block - 3: optional binary cursor, - 4: VertexProp return_columns, + 2: map (cpp.template = "std::unordered_map") + parts, + 3: list return_columns, // max row count of tag in this response - 5: i64 limit, + 4: i64 limit, // only return data in time range [start_time, end_time) - 6: optional i64 start_time, - 7: optional i64 end_time, - 8: optional binary filter, + 5: optional i64 start_time, + 6: optional i64 end_time, + 7: optional binary filter, // when storage enable multi versions and only_latest_version is true, only return latest version. // when storage disable multi versions, just use the default value. - 9: bool only_latest_version = false, + 8: bool only_latest_version = false, // if set to false, forbid follower read - 10: bool enable_read_from_follower = true, - 11: optional RequestCommon common, + 9: bool enable_read_from_follower = true, + 10: optional RequestCommon common, } struct ScanVertexResponse { @@ -586,29 +591,27 @@ struct ScanVertexResponse { // Each column represents one property. the column name is in the form of "tag_name.prop_alias" // in the same order which specified in VertexProp in request. 2: common.DataSet vertex_data, - 3: bool has_next, - // next start key of scan, only valid when has_next is true - 4: optional binary next_cursor, + 3: map (cpp.template = "std::unordered_map") + cursors; } struct ScanEdgeRequest { 1: common.GraphSpaceID space_id, - 2: common.PartitionID part_id, - // start key of this block - 3: optional binary cursor, - 4: EdgeProp return_columns, + 2: map (cpp.template = "std::unordered_map") + parts, + 3: EdgeProp return_columns, // max row count of edge in this response - 5: i64 limit, + 4: i64 limit, // only return data in time range [start_time, end_time) - 6: optional i64 start_time, - 7: optional i64 end_time, - 8: optional binary filter, + 5: optional i64 start_time, + 6: optional i64 end_time, + 7: optional binary filter, // when storage enable multi versions and only_latest_version is true, only return latest version. // when storage disable multi versions, just use the default value. - 9: bool only_latest_version = false, + 8: bool only_latest_version = false, // if set to false, forbid follower read - 10: bool enable_read_from_follower = true, - 11: optional RequestCommon common, + 9: bool enable_read_from_follower = true, + 10: optional RequestCommon common, } struct ScanEdgeResponse { @@ -617,9 +620,8 @@ struct ScanEdgeResponse { // Each column represents one property. the column name is in the form of "edge_name.prop_alias" // in the same order which specified in EdgeProp in requesss. 2: common.DataSet edge_data, - 3: bool has_next, - // next start key of scan, only valid when has_next is true - 4: optional binary next_cursor, + 3: map (cpp.template = "std::unordered_map") + cursors; } struct TaskPara { diff --git a/src/storage/exec/EdgeNode.h b/src/storage/exec/EdgeNode.h index 988923f5f44..beee24665f3 100644 --- a/src/storage/exec/EdgeNode.h +++ b/src/storage/exec/EdgeNode.h @@ -26,7 +26,9 @@ class EdgeNode : public IterateNode { return valueHandler(this->key(), this->reader(), props_); } - const std::string& getEdgeName() { return edgeName_; } + const std::string& getEdgeName() const { return edgeName_; } + + EdgeType edgeType() const { return edgeType_; } protected: EdgeNode(RuntimeContext* context, @@ -113,8 +115,7 @@ class FetchEdgeNode final : public EdgeNode { (*edgeKey.dst_ref()).getStr()); ret = context_->env()->kvstore_->get(context_->spaceId(), partId, key_, &val_); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { - resetReader(); - return nebula::cpp2::ErrorCode::SUCCEEDED; + return doExecute(key_, val_); } else if (ret == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { // regard key not found as succeed as well, upper node will handle it return nebula::cpp2::ErrorCode::SUCCEEDED; @@ -122,6 +123,20 @@ class FetchEdgeNode final : public EdgeNode { return ret; } + nebula::cpp2::ErrorCode doExecute(const std::string& key, const std::string& value) { + key_ = key; + val_ = value; + resetReader(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + + void clear() { + valid_ = false; + key_.clear(); + val_.clear(); + reader_.reset(); + } + private: void resetReader() { reader_.reset(*schemas_, val_); diff --git a/src/storage/exec/RelNode.h b/src/storage/exec/RelNode.h index 1588dbff831..8bc0f2ff39f 100644 --- a/src/storage/exec/RelNode.h +++ b/src/storage/exec/RelNode.h @@ -77,6 +77,8 @@ class RelNode { explicit RelNode(const std::string& name) : name_(name) {} + const std::string& name() const { return name_; } + std::string name_ = "RelNode"; std::vector*> dependencies_; bool hasDependents_ = false; diff --git a/src/storage/exec/ScanNode.h b/src/storage/exec/ScanNode.h new file mode 100644 index 00000000000..3778eb87804 --- /dev/null +++ b/src/storage/exec/ScanNode.h @@ -0,0 +1,274 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#pragma once + +#include "common/base/Base.h" +#include "storage/exec/GetPropNode.h" + +namespace nebula { +namespace storage { + +using Cursor = std::string; + +// Node to scan vertices of one partition +class ScanVertexPropNode : public QueryNode { + public: + using RelNode::doExecute; + + explicit ScanVertexPropNode(RuntimeContext* context, + std::vector> tagNodes, + bool enableReadFollower, + int64_t limit, + std::unordered_map* cursors, + nebula::DataSet* resultDataSet) + : context_(context), + tagNodes_(std::move(tagNodes)), + enableReadFollower_(enableReadFollower), + limit_(limit), + cursors_(cursors), + resultDataSet_(resultDataSet) { + name_ = "ScanVertexPropNode"; + for (std::size_t i = 0; i < tagNodes_.size(); ++i) { + tagNodesIndex_.emplace(tagNodes_[i]->tagId(), i); + } + } + + nebula::cpp2::ErrorCode doExecute(PartitionID partId, const Cursor& cursor) override { + auto ret = RelNode::doExecute(partId); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + return ret; + } + + std::string start; + std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + if (cursor.empty()) { + start = prefix; + } else { + start = cursor; + } + + std::unique_ptr iter; + auto kvRet = context_->env()->kvstore_->rangeWithPrefix( + context_->planContext_->spaceId_, partId, start, prefix, &iter, enableReadFollower_); + if (kvRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + return kvRet; + } + + const auto rowLimit = limit_; + auto vIdLen = context_->vIdLen(); + auto isIntId = context_->isIntId(); + std::string currentVertexId; + for (; iter->valid() && static_cast(resultDataSet_->rowSize()) < rowLimit; + iter->next()) { + auto key = iter->key(); + auto tagId = NebulaKeyUtils::getTagId(vIdLen, key); + auto tagIdIndex = tagNodesIndex_.find(tagId); + if (tagIdIndex == tagNodesIndex_.end()) { + continue; + } + auto vertexId = NebulaKeyUtils::getVertexId(vIdLen, key); + if (vertexId != currentVertexId && !currentVertexId.empty()) { + collectOneRow(isIntId, vIdLen, currentVertexId); + } // collect vertex row + currentVertexId = vertexId; + if (static_cast(resultDataSet_->rowSize()) >= rowLimit) { + break; + } + auto value = iter->val(); + tagNodes_[tagIdIndex->second]->doExecute(key.toString(), value.toString()); + } // iterate key + if (static_cast(resultDataSet_->rowSize()) < rowLimit) { + collectOneRow(isIntId, vIdLen, currentVertexId); + } + + cpp2::ScanCursor c; + if (iter->valid()) { + c.set_has_next(true); + c.set_next_cursor(iter->key().str()); + } else { + c.set_has_next(false); + } + cursors_->emplace(partId, std::move(c)); + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + + void collectOneRow(bool isIntId, std::size_t vIdLen, const std::string& currentVertexId) { + List row; + nebula::cpp2::ErrorCode ret = nebula::cpp2::ErrorCode::SUCCEEDED; + // vertexId is the first column + if (isIntId) { + row.emplace_back(*reinterpret_cast(currentVertexId.data())); + } else { + row.emplace_back(currentVertexId.c_str()); + } + // if none of the tag node valid, do not emplace the row + if (std::any_of(tagNodes_.begin(), tagNodes_.end(), [](const auto& tagNode) { + return tagNode->valid(); + })) { + for (auto& tagNode : tagNodes_) { + ret = tagNode->collectTagPropsIfValid( + [&row](const std::vector* props) -> nebula::cpp2::ErrorCode { + for (const auto& prop : *props) { + if (prop.returned_) { + row.emplace_back(Value()); + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; + }, + [&row, vIdLen, isIntId]( + folly::StringPiece key, + RowReader* reader, + const std::vector* props) -> nebula::cpp2::ErrorCode { + if (!QueryUtils::collectVertexProps(key, vIdLen, isIntId, reader, props, row).ok()) { + return nebula::cpp2::ErrorCode::E_TAG_PROP_NOT_FOUND; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + break; + } + } + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + resultDataSet_->rows.emplace_back(std::move(row)); + } + for (auto& tagNode : tagNodes_) { + tagNode->clear(); + } + } + } + + private: + RuntimeContext* context_; + std::vector> tagNodes_; + std::unordered_map tagNodesIndex_; + bool enableReadFollower_; + int64_t limit_; + // cursors for next scan + std::unordered_map* cursors_; + nebula::DataSet* resultDataSet_; +}; + +// Node to scan edge of one partition +class ScanEdgePropNode : public QueryNode { + public: + using RelNode::doExecute; + + ScanEdgePropNode(RuntimeContext* context, + std::vector> edgeNodes, + bool enableReadFollower, + int64_t limit, + std::unordered_map* cursors, + nebula::DataSet* resultDataSet) + : context_(context), + edgeNodes_(std::move(edgeNodes)), + enableReadFollower_(enableReadFollower), + limit_(limit), + cursors_(cursors), + resultDataSet_(resultDataSet) { + QueryNode::name_ = "ScanEdgePropNode"; + for (std::size_t i = 0; i < edgeNodes_.size(); ++i) { + edgeNodesIndex_.emplace(edgeNodes_[i]->edgeType(), i); + } + } + + nebula::cpp2::ErrorCode doExecute(PartitionID partId, const Cursor& cursor) override { + auto ret = RelNode::doExecute(partId); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + return ret; + } + + std::string start; + std::string prefix = NebulaKeyUtils::edgePrefix(partId); + if (cursor.empty()) { + start = prefix; + } else { + start = cursor; + } + + std::unique_ptr iter; + auto kvRet = context_->env()->kvstore_->rangeWithPrefix( + context_->spaceId(), partId, start, prefix, &iter, enableReadFollower_); + if (kvRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + return kvRet; + } + + auto rowLimit = limit_; + auto vIdLen = context_->vIdLen(); + auto isIntId = context_->isIntId(); + for (; iter->valid() && static_cast(resultDataSet_->rowSize()) < rowLimit; + iter->next()) { + auto key = iter->key(); + if (!NebulaKeyUtils::isEdge(vIdLen, key)) { + continue; + } + auto edgeType = NebulaKeyUtils::getEdgeType(vIdLen, key); + auto edgeNodeIndex = edgeNodesIndex_.find(edgeType); + if (edgeNodeIndex == edgeNodesIndex_.end()) { + continue; + } + auto value = iter->val(); + edgeNodes_[edgeNodeIndex->second]->doExecute(key.toString(), value.toString()); + collectOneRow(isIntId, vIdLen); + } + + cpp2::ScanCursor c; + if (iter->valid()) { + c.set_has_next(true); + c.set_next_cursor(iter->key().str()); + } else { + c.set_has_next(false); + } + cursors_->emplace(partId, std::move(c)); + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + + void collectOneRow(bool isIntId, std::size_t vIdLen) { + List row; + nebula::cpp2::ErrorCode ret = nebula::cpp2::ErrorCode::SUCCEEDED; + for (auto& edgeNode : edgeNodes_) { + ret = edgeNode->collectEdgePropsIfValid( + [&row](const std::vector* props) -> nebula::cpp2::ErrorCode { + for (const auto& prop : *props) { + if (prop.returned_) { + row.emplace_back(Value()); + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; + }, + [&row, vIdLen, isIntId]( + folly::StringPiece key, + RowReader* reader, + const std::vector* props) -> nebula::cpp2::ErrorCode { + if (!QueryUtils::collectEdgeProps(key, vIdLen, isIntId, reader, props, row).ok()) { + return nebula::cpp2::ErrorCode::E_EDGE_PROP_NOT_FOUND; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; + }); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + break; + } + } + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + resultDataSet_->rows.emplace_back(std::move(row)); + } + for (auto& edgeNode : edgeNodes_) { + edgeNode->clear(); + } + } + + private: + RuntimeContext* context_; + std::vector> edgeNodes_; + std::unordered_map edgeNodesIndex_; + bool enableReadFollower_; + int64_t limit_; + // cursors for next scan + std::unordered_map* cursors_; + nebula::DataSet* resultDataSet_; +}; + +} // namespace storage +} // namespace nebula diff --git a/src/storage/exec/TagNode.h b/src/storage/exec/TagNode.h index e203b494b6c..d6d597addc8 100644 --- a/src/storage/exec/TagNode.h +++ b/src/storage/exec/TagNode.h @@ -53,8 +53,7 @@ class TagNode final : public IterateNode { key_ = NebulaKeyUtils::vertexKey(context_->vIdLen(), partId, vId, tagId_); ret = context_->env()->kvstore_->get(context_->spaceId(), partId, key_, &value_); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { - resetReader(); - return nebula::cpp2::ErrorCode::SUCCEEDED; + return doExecute(key_, value_); } else if (ret == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { // regard key not found as succeed as well, upper node will handle it return nebula::cpp2::ErrorCode::SUCCEEDED; @@ -62,6 +61,13 @@ class TagNode final : public IterateNode { return ret; } + nebula::cpp2::ErrorCode doExecute(const std::string& key, const std::string& value) { + key_ = key; + value_ = value; + resetReader(); + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + nebula::cpp2::ErrorCode collectTagPropsIfValid(NullHandler nullHandler, PropHandler valueHandler) { if (!valid()) { @@ -83,7 +89,16 @@ class TagNode final : public IterateNode { RowReader* reader() const override { return reader_.get(); } - const std::string& getTagName() { return tagName_; } + const std::string& getTagName() const { return tagName_; } + + TagID tagId() const { return tagId_; } + + void clear() { + valid_ = false; + key_.clear(); + value_.clear(); + reader_.reset(); + } private: void resetReader() { diff --git a/src/storage/query/ScanEdgeProcessor.cpp b/src/storage/query/ScanEdgeProcessor.cpp index 61a4c2f9770..5da9b6425e6 100644 --- a/src/storage/query/ScanEdgeProcessor.cpp +++ b/src/storage/query/ScanEdgeProcessor.cpp @@ -24,81 +24,35 @@ void ScanEdgeProcessor::process(const cpp2::ScanEdgeRequest& req) { void ScanEdgeProcessor::doProcess(const cpp2::ScanEdgeRequest& req) { spaceId_ = req.get_space_id(); - partId_ = req.get_part_id(); + enableReadFollower_ = req.get_enable_read_from_follower(); + limit_ = req.get_limit(); auto retCode = getSpaceVidLen(spaceId_); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - pushResultCode(retCode, partId_); + for (auto& p : req.get_parts()) { + pushResultCode(retCode, p.first); + } onFinished(); return; } + this->planContext_ = std::make_unique( + this->env_, spaceId_, this->spaceVidLen_, this->isIntId_, req.common_ref()); + retCode = checkAndBuildContexts(req); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - pushResultCode(retCode, partId_); + for (auto& p : req.get_parts()) { + pushResultCode(retCode, p.first); + } onFinished(); return; } - std::string start; - std::string prefix = NebulaKeyUtils::edgePrefix(partId_); - if (req.get_cursor() == nullptr || req.get_cursor()->empty()) { - start = prefix; + if (!FLAGS_query_concurrently) { + runInSingleThread(req); } else { - start = *req.get_cursor(); + runInMultipleThread(req); } - - std::unique_ptr iter; - auto kvRet = env_->kvstore_->rangeWithPrefix( - spaceId_, partId_, start, prefix, &iter, req.get_enable_read_from_follower()); - if (kvRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(kvRet, spaceId_, partId_); - onFinished(); - return; - } - - auto rowLimit = req.get_limit(); - RowReaderWrapper reader; - - for (int64_t rowCount = 0; iter->valid() && rowCount < rowLimit; iter->next()) { - auto key = iter->key(); - if (!NebulaKeyUtils::isEdge(spaceVidLen_, key)) { - continue; - } - - auto edgeType = NebulaKeyUtils::getEdgeType(spaceVidLen_, key); - auto edgeIter = edgeContext_.indexMap_.find(edgeType); - if (edgeIter == edgeContext_.indexMap_.end()) { - continue; - } - - auto val = iter->val(); - auto schemaIter = edgeContext_.schemas_.find(std::abs(edgeType)); - CHECK(schemaIter != edgeContext_.schemas_.end()); - reader.reset(schemaIter->second, val); - if (!reader) { - continue; - } - - nebula::List list; - auto idx = edgeIter->second; - auto props = &(edgeContext_.propContexts_[idx].second); - if (!QueryUtils::collectEdgeProps(key, spaceVidLen_, isIntId_, reader.get(), props, list) - .ok()) { - continue; - } - resultDataSet_.rows.emplace_back(std::move(list)); - rowCount++; - } - - if (iter->valid()) { - resp_.set_has_next(true); - resp_.set_next_cursor(iter->key().str()); - } else { - resp_.set_has_next(false); - } - onProcessFinished(); - onFinished(); } nebula::cpp2::ErrorCode ScanEdgeProcessor::checkAndBuildContexts(const cpp2::ScanEdgeRequest& req) { @@ -123,7 +77,100 @@ void ScanEdgeProcessor::buildEdgeColName(const std::vector& edge } } -void ScanEdgeProcessor::onProcessFinished() { resp_.set_edge_data(std::move(resultDataSet_)); } +void ScanEdgeProcessor::onProcessFinished() { + resp_.set_edge_data(std::move(resultDataSet_)); + resp_.set_cursors(std::move(cursors_)); +} + +StoragePlan ScanEdgeProcessor::buildPlan( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors) { + StoragePlan plan; + std::vector> edges; + for (const auto& ec : edgeContext_.propContexts_) { + edges.emplace_back( + std::make_unique(context, &edgeContext_, ec.first, &ec.second)); + } + auto output = std::make_unique( + context, std::move(edges), enableReadFollower_, limit_, cursors, result); + + plan.addNode(std::move(output)); + return plan; +} + +folly::Future> ScanEdgeProcessor::runInExecutor( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors, + PartitionID partId, + Cursor cursor) { + return folly::via(executor_, + [this, context, result, cursors, partId, input = std::move(cursor)]() { + auto plan = buildPlan(context, result, cursors); + + auto ret = plan.go(partId, input); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + return std::make_pair(ret, partId); + } + return std::make_pair(nebula::cpp2::ErrorCode::SUCCEEDED, partId); + }); +} + +void ScanEdgeProcessor::runInSingleThread(const cpp2::ScanEdgeRequest& req) { + contexts_.emplace_back(RuntimeContext(planContext_.get())); + std::unordered_set failedParts; + auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_); + for (const auto& partEntry : req.get_parts()) { + auto partId = partEntry.first; + auto cursor = partEntry.second; + + auto ret = plan.go(partId, cursor.get_has_next() ? *cursor.get_next_cursor() : ""); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED && + failedParts.find(partId) == failedParts.end()) { + failedParts.emplace(partId); + handleErrorCode(ret, spaceId_, partId); + } + } + onProcessFinished(); + onFinished(); +} + +void ScanEdgeProcessor::runInMultipleThread(const cpp2::ScanEdgeRequest& req) { + cursorsOfPart_.resize(req.get_parts().size()); + for (size_t i = 0; i < req.get_parts().size(); i++) { + nebula::DataSet result = resultDataSet_; + results_.emplace_back(std::move(result)); + contexts_.emplace_back(RuntimeContext(planContext_.get())); + } + size_t i = 0; + std::vector>> futures; + for (const auto& [partId, cursor] : req.get_parts()) { + futures.emplace_back(runInExecutor(&contexts_[i], + &results_[i], + &cursorsOfPart_[i], + partId, + cursor.get_has_next() ? *cursor.get_next_cursor() : "")); + i++; + } + + folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { + CHECK(!t.hasException()); + const auto& tries = t.value(); + for (size_t j = 0; j < tries.size(); j++) { + CHECK(!tries[j].hasException()); + const auto& [code, partId] = tries[j].value(); + if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(code, spaceId_, partId); + } else { + resultDataSet_.append(std::move(results_[j])); + cursors_.merge(std::move(cursorsOfPart_[j])); + } + } + this->onProcessFinished(); + this->onFinished(); + }); +} } // namespace storage } // namespace nebula diff --git a/src/storage/query/ScanEdgeProcessor.h b/src/storage/query/ScanEdgeProcessor.h index f5afef9934c..f1931cd881b 100644 --- a/src/storage/query/ScanEdgeProcessor.h +++ b/src/storage/query/ScanEdgeProcessor.h @@ -7,6 +7,8 @@ #define STORAGE_QUERY_SCANEDGEPROCESSOR_H_ #include "common/base/Base.h" +#include "storage/exec/ScanNode.h" +#include "storage/exec/StoragePlan.h" #include "storage/query/QueryBaseProcessor.h" namespace nebula { @@ -35,9 +37,30 @@ class ScanEdgeProcessor : public QueryBaseProcessor& edgeProps); + StoragePlan buildPlan(RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors); + + folly::Future> runInExecutor( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors, + PartitionID partId, + Cursor cursor); + + void runInSingleThread(const cpp2::ScanEdgeRequest& req); + + void runInMultipleThread(const cpp2::ScanEdgeRequest& req); + void onProcessFinished() override; - PartitionID partId_; + std::vector contexts_; + std::vector results_; + std::vector> cursorsOfPart_; + + std::unordered_map cursors_; + int64_t limit_{-1}; + bool enableReadFollower_{false}; }; } // namespace storage diff --git a/src/storage/query/ScanVertexProcessor.cpp b/src/storage/query/ScanVertexProcessor.cpp index ee9e3981f70..032ec103660 100644 --- a/src/storage/query/ScanVertexProcessor.cpp +++ b/src/storage/query/ScanVertexProcessor.cpp @@ -24,77 +24,35 @@ void ScanVertexProcessor::process(const cpp2::ScanVertexRequest& req) { void ScanVertexProcessor::doProcess(const cpp2::ScanVertexRequest& req) { spaceId_ = req.get_space_id(); - partId_ = req.get_part_id(); + limit_ = req.get_limit(); + enableReadFollower_ = req.get_enable_read_from_follower(); auto retCode = getSpaceVidLen(spaceId_); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - pushResultCode(retCode, partId_); + for (const auto& p : req.get_parts()) { + pushResultCode(retCode, p.first); + } onFinished(); return; } + this->planContext_ = std::make_unique( + this->env_, spaceId_, this->spaceVidLen_, this->isIntId_, req.common_ref()); + retCode = checkAndBuildContexts(req); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - pushResultCode(retCode, partId_); - onFinished(); - return; - } - - std::string start; - std::string prefix = NebulaKeyUtils::vertexPrefix(partId_); - if (req.get_cursor() == nullptr || req.get_cursor()->empty()) { - start = prefix; - } else { - start = *req.get_cursor(); - } - - std::unique_ptr iter; - auto kvRet = env_->kvstore_->rangeWithPrefix( - spaceId_, partId_, start, prefix, &iter, req.get_enable_read_from_follower()); - if (kvRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(kvRet, spaceId_, partId_); + for (const auto& p : req.get_parts()) { + pushResultCode(retCode, p.first); + } onFinished(); return; } - auto rowLimit = req.get_limit(); - RowReaderWrapper reader; - for (int64_t rowCount = 0; iter->valid() && rowCount < rowLimit; iter->next()) { - auto key = iter->key(); - - auto tagId = NebulaKeyUtils::getTagId(spaceVidLen_, key); - auto tagIter = tagContext_.indexMap_.find(tagId); - if (tagIter == tagContext_.indexMap_.end()) { - continue; - } - - auto val = iter->val(); - auto schemaIter = tagContext_.schemas_.find(tagId); - CHECK(schemaIter != tagContext_.schemas_.end()); - reader.reset(schemaIter->second, val); - if (!reader) { - continue; - } - - nebula::List list; - auto idx = tagIter->second; - auto props = &(tagContext_.propContexts_[idx].second); - if (!QueryUtils::collectVertexProps(key, spaceVidLen_, isIntId_, reader.get(), props, list) - .ok()) { - continue; - } - resultDataSet_.rows.emplace_back(std::move(list)); - rowCount++; - } - - if (iter->valid()) { - resp_.set_has_next(true); - resp_.set_next_cursor(iter->key().str()); + if (!FLAGS_query_concurrently) { + runInSingleThread(req); } else { - resp_.set_has_next(false); + runInMultipleThread(req); } - onProcessFinished(); - onFinished(); } nebula::cpp2::ErrorCode ScanVertexProcessor::checkAndBuildContexts( @@ -104,13 +62,14 @@ nebula::cpp2::ErrorCode ScanVertexProcessor::checkAndBuildContexts( return ret; } - std::vector returnProps = {*req.return_columns_ref()}; + std::vector returnProps = *req.return_columns_ref(); ret = handleVertexProps(returnProps); buildTagColName(returnProps); return ret; } void ScanVertexProcessor::buildTagColName(const std::vector& tagProps) { + resultDataSet_.colNames.emplace_back(kVid); for (const auto& tagProp : tagProps) { auto tagId = tagProp.get_tag(); auto tagName = tagContext_.tagNames_[tagId]; @@ -120,7 +79,99 @@ void ScanVertexProcessor::buildTagColName(const std::vector& t } } -void ScanVertexProcessor::onProcessFinished() { resp_.set_vertex_data(std::move(resultDataSet_)); } +void ScanVertexProcessor::onProcessFinished() { + resp_.set_vertex_data(std::move(resultDataSet_)); + resp_.set_cursors(std::move(cursors_)); +} + +StoragePlan ScanVertexProcessor::buildPlan( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors) { + StoragePlan plan; + std::vector> tags; + for (const auto& tc : tagContext_.propContexts_) { + tags.emplace_back(std::make_unique(context, &tagContext_, tc.first, &tc.second)); + } + auto output = std::make_unique( + context, std::move(tags), enableReadFollower_, limit_, cursors, result); + + plan.addNode(std::move(output)); + return plan; +} + +folly::Future> ScanVertexProcessor::runInExecutor( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursorsOfPart, + PartitionID partId, + Cursor cursor) { + return folly::via(executor_, + [this, context, result, cursorsOfPart, partId, input = std::move(cursor)]() { + auto plan = buildPlan(context, result, cursorsOfPart); + + auto ret = plan.go(partId, input); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + return std::make_pair(ret, partId); + } + return std::make_pair(nebula::cpp2::ErrorCode::SUCCEEDED, partId); + }); +} + +void ScanVertexProcessor::runInSingleThread(const cpp2::ScanVertexRequest& req) { + contexts_.emplace_back(RuntimeContext(planContext_.get())); + std::unordered_set failedParts; + auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_); + for (const auto& partEntry : req.get_parts()) { + auto partId = partEntry.first; + auto cursor = partEntry.second; + + auto ret = plan.go(partId, cursor.get_has_next() ? *cursor.get_next_cursor() : ""); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED && + failedParts.find(partId) == failedParts.end()) { + failedParts.emplace(partId); + handleErrorCode(ret, spaceId_, partId); + } + } + onProcessFinished(); + onFinished(); +} + +void ScanVertexProcessor::runInMultipleThread(const cpp2::ScanVertexRequest& req) { + cursorsOfPart_.resize(req.get_parts().size()); + for (size_t i = 0; i < req.get_parts().size(); i++) { + nebula::DataSet result = resultDataSet_; + results_.emplace_back(std::move(result)); + contexts_.emplace_back(RuntimeContext(planContext_.get())); + } + size_t i = 0; + std::vector>> futures; + for (const auto& [partId, cursor] : req.get_parts()) { + futures.emplace_back(runInExecutor(&contexts_[i], + &results_[i], + &cursorsOfPart_[i], + partId, + cursor.get_has_next() ? *cursor.get_next_cursor() : "")); + i++; + } + + folly::collectAll(futures).via(executor_).thenTry([this](auto&& t) mutable { + CHECK(!t.hasException()); + const auto& tries = t.value(); + for (size_t j = 0; j < tries.size(); j++) { + CHECK(!tries[j].hasException()); + const auto& [code, partId] = tries[j].value(); + if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { + handleErrorCode(code, spaceId_, partId); + } else { + resultDataSet_.append(std::move(results_[j])); + cursors_.merge(std::move(cursorsOfPart_[j])); + } + } + this->onProcessFinished(); + this->onFinished(); + }); +} } // namespace storage } // namespace nebula diff --git a/src/storage/query/ScanVertexProcessor.h b/src/storage/query/ScanVertexProcessor.h index d6c988adf43..987c77e2edd 100644 --- a/src/storage/query/ScanVertexProcessor.h +++ b/src/storage/query/ScanVertexProcessor.h @@ -7,6 +7,8 @@ #define STORAGE_QUERY_SCANVERTEXPROCESSOR_H_ #include "common/base/Base.h" +#include "storage/exec/ScanNode.h" +#include "storage/exec/StoragePlan.h" #include "storage/query/QueryBaseProcessor.h" namespace nebula { @@ -36,10 +38,31 @@ class ScanVertexProcessor void buildTagColName(const std::vector& tagProps); + StoragePlan buildPlan(RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors); + + folly::Future> runInExecutor( + RuntimeContext* context, + nebula::DataSet* result, + std::unordered_map* cursors, + PartitionID partId, + Cursor cursor); + + void runInSingleThread(const cpp2::ScanVertexRequest& req); + + void runInMultipleThread(const cpp2::ScanVertexRequest& req); + void onProcessFinished() override; private: - PartitionID partId_; + std::vector contexts_; + std::vector results_; + std::vector> cursorsOfPart_; + + std::unordered_map cursors_; + int64_t limit_{-1}; + bool enableReadFollower_{false}; }; } // namespace storage diff --git a/src/storage/test/ScanEdgeTest.cpp b/src/storage/test/ScanEdgeTest.cpp index 7155534c496..3ed3d41a9dc 100644 --- a/src/storage/test/ScanEdgeTest.cpp +++ b/src/storage/test/ScanEdgeTest.cpp @@ -3,6 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ +#include #include #include "common/base/Base.h" @@ -13,8 +14,8 @@ namespace nebula { namespace storage { -cpp2::ScanEdgeRequest buildRequest(PartitionID partId, - const std::string& cursor, +cpp2::ScanEdgeRequest buildRequest(std::vector partIds, + std::vector cursors, const std::pair>& edge, int64_t rowLimit = 100, int64_t startTime = 0, @@ -22,8 +23,15 @@ cpp2::ScanEdgeRequest buildRequest(PartitionID partId, bool onlyLatestVer = false) { cpp2::ScanEdgeRequest req; req.set_space_id(1); - req.set_part_id(partId); - req.set_cursor(cursor); + cpp2::ScanCursor c; + CHECK_EQ(partIds.size(), cursors.size()); + std::unordered_map parts; + for (std::size_t i = 0; i < partIds.size(); ++i) { + c.set_has_next(!cursors[i].empty()); + c.set_next_cursor(cursors[i]); + parts.emplace(partIds[i], c); + } + req.set_parts(std::move(parts)); EdgeType edgeType = edge.first; cpp2::EdgeProp edgeProp; edgeProp.set_type(edgeType); @@ -96,7 +104,7 @@ TEST(ScanEdgeTest, PropertyTest) { serve, std::vector{kSrc, kType, kRank, kDst, "teamName", "startYear", "endYear"}); for (PartitionID partId = 1; partId <= totalParts; partId++) { - auto req = buildRequest(partId, "", edge); + auto req = buildRequest({partId}, {""}, edge); auto* processor = ScanEdgeProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); @@ -112,7 +120,7 @@ TEST(ScanEdgeTest, PropertyTest) { size_t totalRowCount = 0; auto edge = std::make_pair(serve, std::vector{}); for (PartitionID partId = 1; partId <= totalParts; partId++) { - auto req = buildRequest(partId, "", edge); + auto req = buildRequest({partId}, {""}, edge); auto* processor = ScanEdgeProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); @@ -147,7 +155,7 @@ TEST(ScanEdgeTest, CursorTest) { bool hasNext = true; std::string cursor = ""; while (hasNext) { - auto req = buildRequest(partId, cursor, edge, 5); + auto req = buildRequest({partId}, {cursor}, edge, 5); auto* processor = ScanEdgeProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); @@ -155,10 +163,10 @@ TEST(ScanEdgeTest, CursorTest) { ASSERT_EQ(0, resp.result.failed_parts.size()); checkResponse(*resp.edge_data_ref(), edge, edge.second.size(), totalRowCount); - hasNext = resp.get_has_next(); + hasNext = resp.get_cursors().at(partId).get_has_next(); if (hasNext) { - CHECK(resp.next_cursor_ref().has_value()); - cursor = *resp.next_cursor_ref(); + CHECK(resp.get_cursors().at(partId).next_cursor_ref().has_value()); + cursor = *resp.get_cursors().at(partId).next_cursor_ref(); } } } @@ -174,7 +182,7 @@ TEST(ScanEdgeTest, CursorTest) { bool hasNext = true; std::string cursor = ""; while (hasNext) { - auto req = buildRequest(partId, cursor, edge, 1); + auto req = buildRequest({partId}, {cursor}, edge, 1); auto* processor = ScanEdgeProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); @@ -182,10 +190,10 @@ TEST(ScanEdgeTest, CursorTest) { ASSERT_EQ(0, resp.result.failed_parts.size()); checkResponse(*resp.edge_data_ref(), edge, edge.second.size(), totalRowCount); - hasNext = resp.get_has_next(); + hasNext = resp.get_cursors().at(partId).get_has_next(); if (hasNext) { - CHECK(resp.next_cursor_ref().has_value()); - cursor = *resp.next_cursor_ref(); + CHECK(resp.get_cursors().at(partId).next_cursor_ref().has_value()); + cursor = *resp.get_cursors().at(partId).next_cursor_ref(); } } } @@ -193,6 +201,94 @@ TEST(ScanEdgeTest, CursorTest) { } } +TEST(ScanEdgeTest, MultiplePartsTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + EdgeType serve = 101; + + { + LOG(INFO) << "Scan one edge with some properties in one batch"; + size_t totalRowCount = 0; + auto edge = std::make_pair( + serve, + std::vector{kSrc, kType, kRank, kDst, "teamName", "startYear", "endYear"}); + auto req = buildRequest({1, 3}, {"", ""}, edge); + auto* processor = ScanEdgeProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + checkResponse(*resp.edge_data_ref(), edge, edge.second.size(), totalRowCount); + } + { + LOG(INFO) << "Scan one edge with all properties in one batch"; + size_t totalRowCount = 0; + auto edge = std::make_pair(serve, std::vector{}); + auto req = buildRequest({1, 3}, {"", ""}, edge); + auto* processor = ScanEdgeProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + // all 9 columns in value + checkResponse(*resp.edge_data_ref(), edge, 9, totalRowCount); + } +} + +TEST(ScanEdgeTest, LimitTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + EdgeType serve = 101; + + { + LOG(INFO) << "Scan one edge with some properties in one batch"; + constexpr std::size_t limit = 3; + size_t totalRowCount = 0; + auto edge = std::make_pair( + serve, + std::vector{kSrc, kType, kRank, kDst, "teamName", "startYear", "endYear"}); + auto req = buildRequest({1}, {""}, edge, limit); + auto* processor = ScanEdgeProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + checkResponse(*resp.edge_data_ref(), edge, edge.second.size(), totalRowCount); + EXPECT_EQ(totalRowCount, limit); + } + { + LOG(INFO) << "Scan one edge with all properties in one batch"; + constexpr std::size_t limit = 3; + size_t totalRowCount = 0; + auto edge = std::make_pair(serve, std::vector{}); + auto req = buildRequest({1}, {""}, edge, limit); + auto* processor = ScanEdgeProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + // all 9 columns in value + checkResponse(*resp.edge_data_ref(), edge, 9, totalRowCount); + EXPECT_EQ(totalRowCount, limit); + } +} + } // namespace storage } // namespace nebula diff --git a/src/storage/test/ScanVertexTest.cpp b/src/storage/test/ScanVertexTest.cpp index 6e2cb6f8ae4..f582848ad92 100644 --- a/src/storage/test/ScanVertexTest.cpp +++ b/src/storage/test/ScanVertexTest.cpp @@ -3,6 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ +#include #include #include "common/base/Base.h" @@ -13,24 +14,36 @@ namespace nebula { namespace storage { -cpp2::ScanVertexRequest buildRequest(PartitionID partId, - const std::string& cursor, - const std::pair>& tag, - int64_t rowLimit = 100, - int64_t startTime = 0, - int64_t endTime = std::numeric_limits::max(), - bool onlyLatestVer = false) { +cpp2::ScanVertexRequest buildRequest( + std::vector partIds, + std::vector cursors, + const std::vector>>& tags, + int64_t rowLimit = 100, + int64_t startTime = 0, + int64_t endTime = std::numeric_limits::max(), + bool onlyLatestVer = false) { cpp2::ScanVertexRequest req; req.set_space_id(1); - req.set_part_id(partId); - req.set_cursor(cursor); - TagID tagId = tag.first; - cpp2::VertexProp vertexProp; - vertexProp.set_tag(tagId); - for (const auto& prop : tag.second) { - (*vertexProp.props_ref()).emplace_back(std::move(prop)); + cpp2::ScanCursor c; + CHECK_EQ(partIds.size(), cursors.size()); + std::unordered_map parts; + for (std::size_t i = 0; i < partIds.size(); ++i) { + c.set_has_next(!cursors[i].empty()); + c.set_next_cursor(cursors[i]); + parts.emplace(partIds[i], c); } - req.set_return_columns(std::move(vertexProp)); + req.set_parts(std::move(parts)); + std::vector vertexProps; + for (const auto& tag : tags) { + TagID tagId = tag.first; + cpp2::VertexProp vertexProp; + vertexProp.set_tag(tagId); + for (const auto& prop : tag.second) { + (*vertexProp.props_ref()).emplace_back(std::move(prop)); + } + vertexProps.emplace_back(std::move(vertexProp)); + } + req.set_return_columns(std::move(vertexProps)); req.set_limit(rowLimit); req.set_start_time(startTime); req.set_end_time(endTime); @@ -44,9 +57,10 @@ void checkResponse(const nebula::DataSet& dataSet, size_t& totalRowCount) { ASSERT_EQ(dataSet.colNames.size(), expectColumnCount); if (!tag.second.empty()) { - ASSERT_EQ(dataSet.colNames.size(), tag.second.size()); - for (size_t i = 0; i < dataSet.colNames.size(); i++) { - ASSERT_EQ(dataSet.colNames[i], std::to_string(tag.first) + "." + tag.second[i]); + ASSERT_EQ(dataSet.colNames.size(), tag.second.size() + 1 /* kVid*/); + for (size_t i = 0; i < dataSet.colNames.size() - 1 /* kVid */; i++) { + ASSERT_EQ(dataSet.colNames[i + 1 /* kVid */], + std::to_string(tag.first) + "." + tag.second[i]); } } totalRowCount += dataSet.rows.size(); @@ -63,13 +77,17 @@ void checkResponse(const nebula::DataSet& dataSet, mock::MockData::players_.end(), [&](const auto& player) { return player.name_ == vId; }); CHECK(iter != mock::MockData::players_.end()); - QueryTestUtils::checkPlayer(props, *iter, row.values); + std::vector returnProps({kVid}); + returnProps.insert(returnProps.end(), props.begin(), props.end()); + QueryTestUtils::checkPlayer(returnProps, *iter, row.values); break; } case 2: { // tag team auto iter = std::find(mock::MockData::teams_.begin(), mock::MockData::teams_.end(), vId); - QueryTestUtils::checkTeam(props, *iter, row.values); + std::vector returnProps({kVid}); + returnProps.insert(returnProps.end(), props.begin(), props.end()); + QueryTestUtils::checkTeam(returnProps, *iter, row.values); break; } default: @@ -95,14 +113,14 @@ TEST(ScanVertexTest, PropertyTest) { auto tag = std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); for (PartitionID partId = 1; partId <= totalParts; partId++) { - auto req = buildRequest(partId, "", tag); + auto req = buildRequest({partId}, {""}, {tag}); auto* processor = ScanVertexProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); auto resp = std::move(f).get(); ASSERT_EQ(0, resp.result.failed_parts.size()); - checkResponse(*resp.vertex_data_ref(), tag, tag.second.size(), totalRowCount); + checkResponse(*resp.vertex_data_ref(), tag, tag.second.size() + 1 /* kVid */, totalRowCount); } CHECK_EQ(mock::MockData::players_.size(), totalRowCount); } @@ -110,8 +128,20 @@ TEST(ScanVertexTest, PropertyTest) { LOG(INFO) << "Scan one tag with all properties in one batch"; size_t totalRowCount = 0; auto tag = std::make_pair(player, std::vector{}); + auto respTag = std::make_pair(player, + std::vector{"name", + "age", + "playing", + "career", + "startYear", + "endYear", + "games", + "avgScore", + "serveTeams", + "country", + "champions"}); for (PartitionID partId = 1; partId <= totalParts; partId++) { - auto req = buildRequest(partId, "", tag); + auto req = buildRequest({partId}, {""}, {tag}); auto* processor = ScanVertexProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); @@ -119,7 +149,7 @@ TEST(ScanVertexTest, PropertyTest) { ASSERT_EQ(0, resp.result.failed_parts.size()); // all 11 columns in value - checkResponse(*resp.vertex_data_ref(), tag, 11, totalRowCount); + checkResponse(*resp.vertex_data_ref(), respTag, 11 + 1 /* kVid */, totalRowCount); } CHECK_EQ(mock::MockData::players_.size(), totalRowCount); } @@ -145,18 +175,19 @@ TEST(ScanVertexTest, CursorTest) { bool hasNext = true; std::string cursor = ""; while (hasNext) { - auto req = buildRequest(partId, cursor, tag, 5); + auto req = buildRequest({partId}, {cursor}, {tag}, 5); auto* processor = ScanVertexProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); auto resp = std::move(f).get(); ASSERT_EQ(0, resp.result.failed_parts.size()); - checkResponse(*resp.vertex_data_ref(), tag, tag.second.size(), totalRowCount); - hasNext = resp.get_has_next(); + checkResponse( + *resp.vertex_data_ref(), tag, tag.second.size() + 1 /* kVid */, totalRowCount); + hasNext = resp.get_cursors().at(partId).get_has_next(); if (hasNext) { - CHECK(resp.next_cursor_ref()); - cursor = *resp.next_cursor_ref(); + CHECK(resp.get_cursors().at(partId).next_cursor_ref()); + cursor = *resp.get_cursors().at(partId).next_cursor_ref(); } } } @@ -171,18 +202,19 @@ TEST(ScanVertexTest, CursorTest) { bool hasNext = true; std::string cursor = ""; while (hasNext) { - auto req = buildRequest(partId, cursor, tag, 1); + auto req = buildRequest({partId}, {cursor}, {tag}, 1); auto* processor = ScanVertexProcessor::instance(env, nullptr); auto f = processor->getFuture(); processor->process(req); auto resp = std::move(f).get(); ASSERT_EQ(0, resp.result.failed_parts.size()); - checkResponse(*resp.vertex_data_ref(), tag, tag.second.size(), totalRowCount); - hasNext = resp.get_has_next(); + checkResponse( + *resp.vertex_data_ref(), tag, tag.second.size() + 1 /* kVid */, totalRowCount); + hasNext = resp.get_cursors().at(partId).get_has_next(); if (hasNext) { - CHECK(resp.next_cursor_ref()); - cursor = *resp.next_cursor_ref(); + CHECK(resp.get_cursors().at(partId).next_cursor_ref()); + cursor = *resp.get_cursors().at(partId).next_cursor_ref(); } } } @@ -190,6 +222,221 @@ TEST(ScanVertexTest, CursorTest) { } } +TEST(ScanVertexTest, MultiplePartsTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + TagID player = 1; + + { + LOG(INFO) << "Scan one tag with some properties in one batch"; + size_t totalRowCount = 0; + auto tag = + std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); + auto req = buildRequest({1, 3}, {"", ""}, {tag}); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + checkResponse(*resp.vertex_data_ref(), tag, tag.second.size() + 1 /* kVid */, totalRowCount); + } + { + LOG(INFO) << "Scan one tag with all properties in one batch"; + size_t totalRowCount = 0; + auto tag = std::make_pair(player, std::vector{}); + auto respTag = std::make_pair(player, + std::vector{"name", + "age", + "playing", + "career", + "startYear", + "endYear", + "games", + "avgScore", + "serveTeams", + "country", + "champions"}); + auto req = buildRequest({1, 3}, {"", ""}, {tag}); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + // all 11 columns in value + checkResponse(*resp.vertex_data_ref(), respTag, 11 + 1 /* kVid */, totalRowCount); + } +} + +TEST(ScanVertexTest, LimitTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + TagID player = 1; + + { + LOG(INFO) << "Scan one tag with some properties in one batch"; + constexpr std::size_t limit = 3; + size_t totalRowCount = 0; + auto tag = + std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); + auto req = buildRequest({2}, {""}, {tag}, limit); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + checkResponse(*resp.vertex_data_ref(), tag, tag.second.size() + 1 /* kVid */, totalRowCount); + EXPECT_EQ(totalRowCount, limit); + } + { + LOG(INFO) << "Scan one tag with all properties in one batch"; + constexpr std::size_t limit = 3; + size_t totalRowCount = 0; + auto tag = std::make_pair(player, std::vector{}); + auto respTag = std::make_pair(player, + std::vector{"name", + "age", + "playing", + "career", + "startYear", + "endYear", + "games", + "avgScore", + "serveTeams", + "country", + "champions"}); + auto req = buildRequest({2}, {""}, {tag}, limit); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + // all 11 columns in value + checkResponse(*resp.vertex_data_ref(), respTag, 11 + 1 /* kVid */, totalRowCount); + EXPECT_EQ(totalRowCount, limit); + } +} + +TEST(ScanVertexTest, MultipleTagsTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + TagID player = 1; + TagID team = 2; + + { + LOG(INFO) << "Scan one tag with some properties in one batch"; + // size_t totalRowCount = 0; + auto playerTag = + std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); + auto teamTag = std::make_pair(team, std::vector{kTag, "name"}); + auto req = buildRequest({1}, {""}, {playerTag, teamTag}); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + nebula::DataSet expect( + {"_vid", "1._vid", "1._tag", "1.name", "1.age", "1.avgScore", "2._tag", "2.name"}); + expect.emplace_back(List({"Bulls", + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + 2, + "Bulls"})); + expect.emplace_back(List({"Cavaliers", + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + 2, + "Cavaliers"})); + expect.emplace_back(List({"Damian Lillard", + "Damian Lillard", + 1, + "Damian Lillard", + 29, + 24, + Value::kEmpty, + Value::kEmpty})); + expect.emplace_back(List( + {"Jason Kidd", "Jason Kidd", 1, "Jason Kidd", 47, 12.6, Value::kEmpty, Value::kEmpty})); + expect.emplace_back(List( + {"Kevin Durant", "Kevin Durant", 1, "Kevin Durant", 31, 27, Value::kEmpty, Value::kEmpty})); + expect.emplace_back(List( + {"Kobe Bryant", "Kobe Bryant", 1, "Kobe Bryant", 41, 25, Value::kEmpty, Value::kEmpty})); + expect.emplace_back(List({"Kristaps Porzingis", + "Kristaps Porzingis", + 1, + "Kristaps Porzingis", + 24, + 18.1, + Value::kEmpty, + Value::kEmpty})); + expect.emplace_back(List( + {"Luka Doncic", "Luka Doncic", 1, "Luka Doncic", 21, 24.4, Value::kEmpty, Value::kEmpty})); + expect.emplace_back(List({"Mavericks", + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + 2, + "Mavericks"})); + expect.emplace_back(List({"Nuggets", + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + Value::kEmpty, + 2, + "Nuggets"})); + expect.emplace_back(List( + {"Paul George", "Paul George", 1, "Paul George", 30, 19.9, Value::kEmpty, Value::kEmpty})); + expect.emplace_back(List({"Tracy McGrady", + "Tracy McGrady", + 1, + "Tracy McGrady", + 41, + 19.6, + Value::kEmpty, + Value::kEmpty})); + expect.emplace_back(List({"Vince Carter", + "Vince Carter", + 1, + "Vince Carter", + 43, + 16.7, + Value::kEmpty, + Value::kEmpty})); + EXPECT_EQ(expect, *resp.vertex_data_ref()); + } +} + } // namespace storage } // namespace nebula From a5ca6e50e518127b2eb474af2281bbc20b626617 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Fri, 12 Nov 2021 09:00:03 +0800 Subject: [PATCH 11/53] Remove the useless make target (#3226) --- CMakeLists.txt | 7 ------- 1 file changed, 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f2d12a1100..05fe56e0351 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -45,13 +45,6 @@ include(GitHooksConfig) include(GitInfoConfig) include(NebulaCustomTargets) -add_custom_target( - install-all - COMMAND $MAKE install - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} -) - - add_custom_target( clang-format COMMAND "find" "src/" "-type" "f" "\\(" "-iname" "\\*.h" "-o" "-iname" "\\*.cpp" "\\)" "|" "xargs" "clang-format" "-i" From 2c1d720bfd0262152e6e18ddf6ca6affc002b2c7 Mon Sep 17 00:00:00 2001 From: "lionel.liu@vesoft.com" <52276794+liuyu85cn@users.noreply.github.com> Date: Fri, 12 Nov 2021 10:31:02 +0800 Subject: [PATCH 12/53] fix a bug may stuck (#3306) * fix a bug may stuck * update Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .../ChainAddEdgesProcessorLocal.cpp | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp b/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp index 9d12904535d..8587d5021b8 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp +++ b/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp @@ -172,18 +172,25 @@ folly::SemiFuture ChainAddEdgesProcessorLocal::forwardToDelegateProcessor( }; auto futProc = proc->getFuture(); auto [pro, fut] = folly::makePromiseContract(); - std::move(futProc).thenValue([&, p = std::move(pro)](auto&& resp) mutable { - auto rc = extractRpcError(resp); - if (rc == Code::SUCCEEDED) { - if (FLAGS_trace_toss) { - for (auto& k : kvErased_) { - VLOG(1) << uuid_ << " erase prime " << folly::hexlify(k); + std::move(futProc).thenTry([&, p = std::move(pro)](auto&& t) mutable { + auto rc = Code::SUCCEEDED; + if (t.hasException()) { + LOG(INFO) << "catch ex: " << t.exception().what(); + rc = Code::E_UNKNOWN; + } else { + auto& resp = t.value(); + rc = extractRpcError(resp); + if (rc == Code::SUCCEEDED) { + if (FLAGS_trace_toss) { + for (auto& k : kvErased_) { + VLOG(1) << uuid_ << " erase prime " << folly::hexlify(k); + } } + } else { + VLOG(1) << uuid_ << " forwardToDelegateProcessor(), code = " + << apache::thrift::util::enumNameSafe(rc); + addUnfinishedEdge(ResumeType::RESUME_CHAIN); } - } else { - VLOG(1) << uuid_ - << " forwardToDelegateProcessor(), code = " << apache::thrift::util::enumNameSafe(rc); - addUnfinishedEdge(ResumeType::RESUME_CHAIN); } p.setValue(rc); }); From 28776aec37821aaf1a92f9f5f06fad3bd205b8bb Mon Sep 17 00:00:00 2001 From: jakevin <30525741+jackwener@users.noreply.github.com> Date: Fri, 12 Nov 2021 17:27:53 +0800 Subject: [PATCH 13/53] remove read lock in meta client (#3256) * add one * test * test * test * add more debug Info * Revert "add more debug Info" This reverts commit 90ef647624cc535363e2d565678ed09137ef83ec. * remove * wip * finish * remove redundant variable * remove annotation * remove more rlock Co-authored-by: cpw <13495049+CPWstatic@users.noreply.github.com> --- src/clients/meta/MetaClient.cpp | 559 +++++++++++--------- src/clients/meta/MetaClient.h | 66 ++- src/graph/service/CloudAuthenticator.cpp | 2 +- src/graph/service/CloudAuthenticator.h | 4 +- src/graph/service/PasswordAuthenticator.cpp | 4 +- src/graph/service/PasswordAuthenticator.h | 4 +- src/graph/service/QueryEngine.h | 2 +- 7 files changed, 373 insertions(+), 268 deletions(-) diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index ecc8ca83d81..a6c709deae6 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -77,14 +77,8 @@ bool MetaClient::isMetadReady() { } // ready_ will be set in loadData - bool ldRet = loadData(); - bool lcRet = true; - if (!options_.skipConfig_) { - lcRet = loadCfg(); - } - if (ldRet && lcRet) { - localLastUpdateTime_ = metadLastUpdateTime_; - } + loadData(); + loadCfg(); return ready_; } @@ -141,16 +135,8 @@ void MetaClient::heartBeatThreadFunc() { } // if MetaServer has some changes, refesh the localCache_ - if (localLastUpdateTime_ < metadLastUpdateTime_) { - bool ldRet = loadData(); - bool lcRet = true; - if (!options_.skipConfig_) { - lcRet = loadCfg(); - } - if (ldRet && lcRet) { - localLastUpdateTime_ = metadLastUpdateTime_; - } - } + loadData(); + loadCfg(); } bool MetaClient::loadUsersAndRoles() { @@ -179,6 +165,10 @@ bool MetaClient::loadUsersAndRoles() { } bool MetaClient::loadData() { + if (localDataLastUpdateTime_ == metadLastUpdateTime_) { + return true; + } + if (ioThreadPool_->numThreads() <= 0) { LOG(ERROR) << "The threads number in ioThreadPool should be greater than 0"; return false; @@ -305,6 +295,8 @@ bool MetaClient::loadData() { storageHosts_ = std::move(hosts); } + localDataLastUpdateTime_.store(metadLastUpdateTime_.load()); + diff(oldCache, localCache_); listenerDiff(oldCache, localCache_); loadRemoteListeners(); @@ -312,6 +304,78 @@ bool MetaClient::loadData() { return true; } +TagSchemas MetaClient::buildTagSchemas(std::vector tagItemVec, ObjectPool* pool) { + TagSchemas tagSchemas; + TagID lastTagId = -1; + for (auto& tagIt : tagItemVec) { + // meta will return the different version from new to old + auto schema = std::make_shared(tagIt.get_version()); + for (const auto& colIt : tagIt.get_schema().get_columns()) { + addSchemaField(schema.get(), colIt, pool); + } + // handle schema property + schema->setProp(tagIt.get_schema().get_schema_prop()); + if (tagIt.get_tag_id() != lastTagId) { + // init schema vector, since schema version is zero-based, need to add one + tagSchemas[tagIt.get_tag_id()].resize(schema->getVersion() + 1); + lastTagId = tagIt.get_tag_id(); + } + tagSchemas[tagIt.get_tag_id()][schema->getVersion()] = std::move(schema); + } + return tagSchemas; +} + +EdgeSchemas MetaClient::buildEdgeSchemas(std::vector edgeItemVec, + ObjectPool* pool) { + EdgeSchemas edgeSchemas; + std::unordered_set> edges; + EdgeType lastEdgeType = -1; + for (auto& edgeIt : edgeItemVec) { + // meta will return the different version from new to old + auto schema = std::make_shared(edgeIt.get_version()); + for (const auto& col : edgeIt.get_schema().get_columns()) { + MetaClient::addSchemaField(schema.get(), col, pool); + } + // handle shcem property + schema->setProp(edgeIt.get_schema().get_schema_prop()); + if (edgeIt.get_edge_type() != lastEdgeType) { + // init schema vector, since schema version is zero-based, need to add one + edgeSchemas[edgeIt.get_edge_type()].resize(schema->getVersion() + 1); + lastEdgeType = edgeIt.get_edge_type(); + } + edgeSchemas[edgeIt.get_edge_type()][schema->getVersion()] = std::move(schema); + } + return edgeSchemas; +} + +void MetaClient::addSchemaField(NebulaSchemaProvider* schema, + const cpp2::ColumnDef& col, + ObjectPool* pool) { + bool hasDef = col.default_value_ref().has_value(); + auto& colType = col.get_type(); + size_t len = colType.type_length_ref().has_value() ? *colType.get_type_length() : 0; + cpp2::GeoShape geoShape = + colType.geo_shape_ref().has_value() ? *colType.get_geo_shape() : cpp2::GeoShape::ANY; + bool nullable = col.nullable_ref().has_value() ? *col.get_nullable() : false; + Expression* defaultValueExpr = nullptr; + if (hasDef) { + auto encoded = *col.get_default_value(); + defaultValueExpr = Expression::decode(pool, folly::StringPiece(encoded.data(), encoded.size())); + + if (defaultValueExpr == nullptr) { + LOG(ERROR) << "Wrong expr default value for column name: " << col.get_name(); + hasDef = false; + } + } + + schema->addField(col.get_name(), + colType.get_type(), + len, + nullable, + hasDef ? defaultValueExpr : nullptr, + geoShape); +} + bool MetaClient::loadSchemas(GraphSpaceID spaceId, std::shared_ptr spaceInfoCache, SpaceTagNameIdMap& tagNameIdMap, @@ -336,52 +400,12 @@ bool MetaClient::loadSchemas(GraphSpaceID spaceId, auto tagItemVec = tagRet.value(); auto edgeItemVec = edgeRet.value(); allEdgeMap[spaceId] = {}; - TagSchemas tagSchemas; - EdgeSchemas edgeSchemas; - TagID lastTagId = -1; - - auto addSchemaField = [&spaceInfoCache](NebulaSchemaProvider* schema, - const cpp2::ColumnDef& col) { - bool hasDef = col.default_value_ref().has_value(); - auto& colType = col.get_type(); - size_t len = colType.type_length_ref().has_value() ? *colType.get_type_length() : 0; - cpp2::GeoShape geoShape = - colType.geo_shape_ref().has_value() ? *colType.get_geo_shape() : cpp2::GeoShape::ANY; - bool nullable = col.nullable_ref().has_value() ? *col.get_nullable() : false; - Expression* defaultValueExpr = nullptr; - if (hasDef) { - auto encoded = *col.get_default_value(); - defaultValueExpr = Expression::decode(&(spaceInfoCache->pool_), - folly::StringPiece(encoded.data(), encoded.size())); - - if (defaultValueExpr == nullptr) { - LOG(ERROR) << "Wrong expr default value for column name: " << col.get_name(); - hasDef = false; - } - } - - schema->addField(col.get_name(), - colType.get_type(), - len, - nullable, - hasDef ? defaultValueExpr : nullptr, - geoShape); - }; + spaceInfoCache->tagItemVec_ = tagItemVec; + spaceInfoCache->tagSchemas_ = buildTagSchemas(tagItemVec, &spaceInfoCache->pool_); + spaceInfoCache->edgeItemVec_ = edgeItemVec; + spaceInfoCache->edgeSchemas_ = buildEdgeSchemas(edgeItemVec, &spaceInfoCache->pool_); for (auto& tagIt : tagItemVec) { - // meta will return the different version from new to old - auto schema = std::make_shared(tagIt.get_version()); - for (const auto& colIt : tagIt.get_schema().get_columns()) { - addSchemaField(schema.get(), colIt); - } - // handle schema property - schema->setProp(tagIt.get_schema().get_schema_prop()); - if (tagIt.get_tag_id() != lastTagId) { - // init schema vector, since schema version is zero-based, need to add one - tagSchemas[tagIt.get_tag_id()].resize(schema->getVersion() + 1); - lastTagId = tagIt.get_tag_id(); - } - tagSchemas[tagIt.get_tag_id()][schema->getVersion()] = std::move(schema); tagNameIdMap.emplace(std::make_pair(spaceId, tagIt.get_tag_name()), tagIt.get_tag_id()); tagIdNameMap.emplace(std::make_pair(spaceId, tagIt.get_tag_id()), tagIt.get_tag_name()); // get the latest tag version @@ -398,21 +422,7 @@ bool MetaClient::loadSchemas(GraphSpaceID spaceId, } std::unordered_set> edges; - EdgeType lastEdgeType = -1; for (auto& edgeIt : edgeItemVec) { - // meta will return the different version from new to old - auto schema = std::make_shared(edgeIt.get_version()); - for (const auto& col : edgeIt.get_schema().get_columns()) { - addSchemaField(schema.get(), col); - } - // handle shcem property - schema->setProp(edgeIt.get_schema().get_schema_prop()); - if (edgeIt.get_edge_type() != lastEdgeType) { - // init schema vector, since schema version is zero-based, need to add one - edgeSchemas[edgeIt.get_edge_type()].resize(schema->getVersion() + 1); - lastEdgeType = edgeIt.get_edge_type(); - } - edgeSchemas[edgeIt.get_edge_type()][schema->getVersion()] = std::move(schema); edgeNameTypeMap.emplace(std::make_pair(spaceId, edgeIt.get_edge_name()), edgeIt.get_edge_type()); edgeTypeNameMap.emplace(std::make_pair(spaceId, edgeIt.get_edge_type()), @@ -437,11 +447,20 @@ bool MetaClient::loadSchemas(GraphSpaceID spaceId, << " Successfully!"; } - spaceInfoCache->tagSchemas_ = std::move(tagSchemas); - spaceInfoCache->edgeSchemas_ = std::move(edgeSchemas); return true; } +static Indexes buildIndexes(std::vector indexItemVec) { + Indexes indexes; + for (auto index : indexItemVec) { + auto indexName = index.get_index_name(); + auto indexID = index.get_index_id(); + auto indexPtr = std::make_shared(index); + indexes.emplace(indexID, indexPtr); + } + return indexes; +} + bool MetaClient::loadIndexes(GraphSpaceID spaceId, std::shared_ptr cache) { auto tagIndexesRet = listTagIndexes(spaceId).get(); if (!tagIndexesRet.ok()) { @@ -457,27 +476,25 @@ bool MetaClient::loadIndexes(GraphSpaceID spaceId, std::shared_ptrtagIndexItemVec_ = tagIndexItemVec; + cache->tagIndexes_ = buildIndexes(tagIndexItemVec); + for (const auto& tagIndex : tagIndexItemVec) { auto indexName = tagIndex.get_index_name(); auto indexID = tagIndex.get_index_id(); std::pair pair(spaceId, indexName); tagNameIndexMap_[pair] = indexID; - auto tagIndexPtr = std::make_shared(tagIndex); - tagIndexes.emplace(indexID, tagIndexPtr); } - cache->tagIndexes_ = std::move(tagIndexes); - Indexes edgeIndexes; - for (auto& edgeIndex : edgeIndexesRet.value()) { + auto edgeIndexItemVec = edgeIndexesRet.value(); + cache->edgeIndexItemVec_ = edgeIndexItemVec; + cache->edgeIndexes_ = buildIndexes(edgeIndexItemVec); + for (auto& edgeIndex : edgeIndexItemVec) { auto indexName = edgeIndex.get_index_name(); auto indexID = edgeIndex.get_index_id(); std::pair pair(spaceId, indexName); edgeNameIndexMap_[pair] = indexID; - auto edgeIndexPtr = std::make_shared(edgeIndex); - edgeIndexes.emplace(indexID, edgeIndexPtr); } - cache->edgeIndexes_ = std::move(edgeIndexes); return true; } @@ -522,10 +539,46 @@ bool MetaClient::loadFulltextIndexes() { return true; } -Status MetaClient::checkTagIndexed(GraphSpaceID space, IndexID indexID) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(space); - if (it != localCache_.end()) { +const MetaClient::ThreadLocalInfo& MetaClient::getThreadLocalInfo() { + ThreadLocalInfo& threadLocalInfo = folly::SingletonThreadLocal::get(); + + if (threadLocalInfo.localLastUpdateTime_ < localDataLastUpdateTime_) { + threadLocalInfo.localLastUpdateTime_ = localDataLastUpdateTime_; + + folly::RWSpinLock::ReadHolder holder(localCacheLock_); + for (auto& spaceInfo : localCache_) { + GraphSpaceID spaceId = spaceInfo.first; + std::shared_ptr info = spaceInfo.second; + std::shared_ptr infoDeepCopy = std::make_shared(*info); + infoDeepCopy->tagSchemas_ = buildTagSchemas(infoDeepCopy->tagItemVec_, &infoDeepCopy->pool_); + infoDeepCopy->edgeSchemas_ = + buildEdgeSchemas(infoDeepCopy->edgeItemVec_, &infoDeepCopy->pool_); + infoDeepCopy->tagIndexes_ = buildIndexes(infoDeepCopy->tagIndexItemVec_); + infoDeepCopy->edgeIndexes_ = buildIndexes(infoDeepCopy->edgeIndexItemVec_); + threadLocalInfo.localCache_[spaceId] = infoDeepCopy; + } + threadLocalInfo.spaceIndexByName_ = spaceIndexByName_; + threadLocalInfo.spaceTagIndexByName_ = spaceTagIndexByName_; + threadLocalInfo.spaceEdgeIndexByName_ = spaceEdgeIndexByName_; + threadLocalInfo.spaceEdgeIndexByType_ = spaceEdgeIndexByType_; + threadLocalInfo.spaceNewestTagVerMap_ = spaceNewestTagVerMap_; + threadLocalInfo.spaceNewestEdgeVerMap_ = spaceNewestEdgeVerMap_; + threadLocalInfo.spaceTagIndexById_ = spaceTagIndexById_; + threadLocalInfo.spaceAllEdgeMap_ = spaceAllEdgeMap_; + + threadLocalInfo.userRolesMap_ = userRolesMap_; + threadLocalInfo.storageHosts_ = storageHosts_; + threadLocalInfo.fulltextIndexMap_ = fulltextIndexMap_; + threadLocalInfo.userPasswordMap_ = userPasswordMap_; + } + + return threadLocalInfo; +} + +Status MetaClient::checkTagIndexed(GraphSpaceID spaceId, IndexID indexID) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(spaceId); + if (it != threadLocalInfo.localCache_.end()) { auto indexIt = it->second->tagIndexes_.find(indexID); if (indexIt != it->second->tagIndexes_.end()) { return Status::OK(); @@ -537,9 +590,9 @@ Status MetaClient::checkTagIndexed(GraphSpaceID space, IndexID indexID) { } Status MetaClient::checkEdgeIndexed(GraphSpaceID space, IndexID indexID) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(space); - if (it != localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(space); + if (it != threadLocalInfo.localCache_.end()) { auto indexIt = it->second->edgeIndexes_.find(indexID); if (indexIt != it->second->edgeIndexes_.end()) { return Status::OK(); @@ -830,9 +883,9 @@ Status MetaClient::handleResponse(const RESP& resp) { PartsMap MetaClient::doGetPartsMap(const HostAddr& host, const LocalCache& localCache) { PartsMap partMap; - for (auto it = localCache.begin(); it != localCache.end(); it++) { - auto spaceId = it->first; - auto& cache = it->second; + for (const auto& it : localCache) { + auto spaceId = it.first; + auto& cache = it.second; auto partsIt = cache->partsOnHost_.find(host); if (partsIt != cache->partsOnHost_.end()) { for (auto& partId : partsIt->second) { @@ -857,28 +910,28 @@ void MetaClient::diff(const LocalCache& oldCache, const LocalCache& newCache) { auto newPartsMap = doGetPartsMap(options_.localHost_, newCache); auto oldPartsMap = doGetPartsMap(options_.localHost_, oldCache); VLOG(1) << "Let's check if any new parts added/updated for " << options_.localHost_; - for (auto it = newPartsMap.begin(); it != newPartsMap.end(); it++) { - auto spaceId = it->first; - const auto& newParts = it->second; + for (auto& it : newPartsMap) { + auto spaceId = it.first; + const auto& newParts = it.second; auto oldIt = oldPartsMap.find(spaceId); if (oldIt == oldPartsMap.end()) { VLOG(1) << "SpaceId " << spaceId << " was added!"; listener_->onSpaceAdded(spaceId); - for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) { - listener_->onPartAdded(partIt->second); + for (const auto& newPart : newParts) { + listener_->onPartAdded(newPart.second); } } else { const auto& oldParts = oldIt->second; - for (auto partIt = newParts.begin(); partIt != newParts.end(); partIt++) { - auto oldPartIt = oldParts.find(partIt->first); + for (const auto& newPart : newParts) { + auto oldPartIt = oldParts.find(newPart.first); if (oldPartIt == oldParts.end()) { - VLOG(1) << "SpaceId " << spaceId << ", partId " << partIt->first << " was added!"; - listener_->onPartAdded(partIt->second); + VLOG(1) << "SpaceId " << spaceId << ", partId " << newPart.first << " was added!"; + listener_->onPartAdded(newPart.second); } else { const auto& oldPartHosts = oldPartIt->second; - const auto& newPartHosts = partIt->second; + const auto& newPartHosts = newPart.second; if (oldPartHosts != newPartHosts) { - VLOG(1) << "SpaceId " << spaceId << ", partId " << partIt->first << " was updated!"; + VLOG(1) << "SpaceId " << spaceId << ", partId " << newPart.first << " was updated!"; listener_->onPartUpdated(newPartHosts); } } @@ -886,23 +939,23 @@ void MetaClient::diff(const LocalCache& oldCache, const LocalCache& newCache) { } } VLOG(1) << "Let's check if any old parts removed...."; - for (auto it = oldPartsMap.begin(); it != oldPartsMap.end(); it++) { - auto spaceId = it->first; - const auto& oldParts = it->second; + for (auto& it : oldPartsMap) { + auto spaceId = it.first; + const auto& oldParts = it.second; auto newIt = newPartsMap.find(spaceId); if (newIt == newPartsMap.end()) { VLOG(1) << "SpaceId " << spaceId << " was removed!"; - for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) { - listener_->onPartRemoved(spaceId, partIt->first); + for (const auto& oldPart : oldParts) { + listener_->onPartRemoved(spaceId, oldPart.first); } listener_->onSpaceRemoved(spaceId); } else { const auto& newParts = newIt->second; - for (auto partIt = oldParts.begin(); partIt != oldParts.end(); partIt++) { - auto newPartIt = newParts.find(partIt->first); + for (const auto& oldPart : oldParts) { + auto newPartIt = newParts.find(oldPart.first); if (newPartIt == newParts.end()) { - VLOG(1) << "SpaceId " << spaceId << ", partId " << partIt->first << " was removed!"; - listener_->onPartRemoved(spaceId, partIt->first); + VLOG(1) << "SpaceId " << spaceId << ", partId " << oldPart.first << " was removed!"; + listener_->onPartRemoved(spaceId, oldPart.first); } } } @@ -1177,8 +1230,8 @@ MetaClient::getPartsAlloc(GraphSpaceID spaceId, PartTerms* partTerms) { [](auto client, auto request) { return client->future_getPartsAlloc(request); }, [=](cpp2::GetPartsAllocResp&& resp) -> decltype(auto) { std::unordered_map> parts; - for (auto it = resp.get_parts().begin(); it != resp.get_parts().end(); it++) { - parts.emplace(it->first, it->second); + for (const auto& it : resp.get_parts()) { + parts.emplace(it.first, it.second); } if (partTerms && resp.terms_ref().has_value()) { for (auto& termOfPart : resp.terms_ref().value()) { @@ -1195,9 +1248,9 @@ StatusOr MetaClient::getSpaceIdByNameFromCache(const std::string& if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceIndexByName_.find(name); - if (it != spaceIndexByName_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceIndexByName_.find(name); + if (it != threadLocalInfo.spaceIndexByName_.end()) { return it->second; } return Status::SpaceNotFound(); @@ -1207,9 +1260,9 @@ StatusOr MetaClient::getSpaceNameByIdFromCache(GraphSpaceID spaceId if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { LOG(ERROR) << "Space " << spaceId << " not found!"; return Status::Error("Space %d not found", spaceId); } @@ -1221,9 +1274,9 @@ StatusOr MetaClient::getTagIDByNameFromCache(const GraphSpaceID& space, if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceTagIndexByName_.find(std::make_pair(space, name)); - if (it == spaceTagIndexByName_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceTagIndexByName_.find(std::make_pair(space, name)); + if (it == threadLocalInfo.spaceTagIndexByName_.end()) { return Status::Error("TagName `%s' is nonexistent", name.c_str()); } return it->second; @@ -1234,9 +1287,9 @@ StatusOr MetaClient::getTagNameByIdFromCache(const GraphSpaceID& sp if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceTagIndexById_.find(std::make_pair(space, tagId)); - if (it == spaceTagIndexById_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceTagIndexById_.find(std::make_pair(space, tagId)); + if (it == threadLocalInfo.spaceTagIndexById_.end()) { return Status::Error("TagID `%d' is nonexistent", tagId); } return it->second; @@ -1247,9 +1300,9 @@ StatusOr MetaClient::getEdgeTypeByNameFromCache(const GraphSpaceID& sp if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceEdgeIndexByName_.find(std::make_pair(space, name)); - if (it == spaceEdgeIndexByName_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceEdgeIndexByName_.find(std::make_pair(space, name)); + if (it == threadLocalInfo.spaceEdgeIndexByName_.end()) { return Status::Error("EdgeName `%s' is nonexistent", name.c_str()); } return it->second; @@ -1260,9 +1313,9 @@ StatusOr MetaClient::getEdgeNameByTypeFromCache(const GraphSpaceID& if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceEdgeIndexByType_.find(std::make_pair(space, edgeType)); - if (it == spaceEdgeIndexByType_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceEdgeIndexByType_.find(std::make_pair(space, edgeType)); + if (it == threadLocalInfo.spaceEdgeIndexByType_.end()) { return Status::Error("EdgeType `%d' is nonexistent", edgeType); } return it->second; @@ -1272,9 +1325,9 @@ StatusOr> MetaClient::getAllEdgeFromCache(const GraphSp if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceAllEdgeMap_.find(space); - if (it == spaceAllEdgeMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceAllEdgeMap_.find(space); + if (it == threadLocalInfo.spaceAllEdgeMap_.end()) { return Status::Error("SpaceId `%d' is nonexistent", space); } return it->second; @@ -1407,14 +1460,14 @@ folly::Future> MetaClient::removeRange(std::string segment, } PartsMap MetaClient::getPartsMapFromCache(const HostAddr& host) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - return doGetPartsMap(host, localCache_); + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + return doGetPartsMap(host, threadLocalInfo.localCache_); } StatusOr MetaClient::getPartHostsFromCache(GraphSpaceID spaceId, PartitionID partId) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(spaceId); - if (it == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(spaceId); + if (it == threadLocalInfo.localCache_.end()) { return Status::Error("Space not found, spaceid: %d", spaceId); } auto& cache = it->second; @@ -1432,9 +1485,9 @@ StatusOr MetaClient::getPartHostsFromCache(GraphSpaceID spaceId, Part Status MetaClient::checkPartExistInCache(const HostAddr& host, GraphSpaceID spaceId, PartitionID partId) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(spaceId); - if (it != localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(spaceId); + if (it != threadLocalInfo.localCache_.end()) { auto partsIt = it->second->partsOnHost_.find(host); if (partsIt != it->second->partsOnHost_.end()) { for (auto& pId : partsIt->second) { @@ -1451,9 +1504,9 @@ Status MetaClient::checkPartExistInCache(const HostAddr& host, } Status MetaClient::checkSpaceExistInCache(const HostAddr& host, GraphSpaceID spaceId) { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(spaceId); - if (it != localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(spaceId); + if (it != threadLocalInfo.localCache_.end()) { auto partsIt = it->second->partsOnHost_.find(host); if (partsIt != it->second->partsOnHost_.end() && !partsIt->second.empty()) { return Status::OK(); @@ -1464,10 +1517,10 @@ Status MetaClient::checkSpaceExistInCache(const HostAddr& host, GraphSpaceID spa return Status::SpaceNotFound(); } -StatusOr MetaClient::partsNum(GraphSpaceID spaceId) const { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = localCache_.find(spaceId); - if (it == localCache_.end()) { +StatusOr MetaClient::partsNum(GraphSpaceID spaceId) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.localCache_.find(spaceId); + if (it == threadLocalInfo.localCache_.end()) { return Status::Error("Space not found, spaceid: %d", spaceId); } return it->second->partsAlloc_.size(); @@ -1850,9 +1903,9 @@ StatusOr MetaClient::getSpaceVidLen(const GraphSpaceID& spaceId) { if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { LOG(ERROR) << "Space " << spaceId << " not found!"; return Status::Error("Space %d not found", spaceId); } @@ -1868,9 +1921,9 @@ StatusOr MetaClient::getSpaceVidType(const GraphSpac if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { LOG(ERROR) << "Space " << spaceId << " not found!"; return Status::Error("Space %d not found", spaceId); } @@ -1889,9 +1942,9 @@ StatusOr MetaClient::getSpaceDesc(const GraphSpaceID& space) { if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(space); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(space); + if (spaceIt == threadLocalInfo.localCache_.end()) { LOG(ERROR) << "Space " << space << " not found!"; return Status::Error("Space %d not found", space); } @@ -1912,9 +1965,9 @@ StatusOr> MetaClient::getTagSchemaFr if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt != localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt != threadLocalInfo.localCache_.end()) { auto tagIt = spaceIt->second->tagSchemas_.find(tagID); if (tagIt != spaceIt->second->tagSchemas_.end() && !tagIt->second.empty()) { size_t vNum = tagIt->second.size(); @@ -1932,9 +1985,9 @@ StatusOr> MetaClient::getEdgeSchemaF if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt != localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt != threadLocalInfo.localCache_.end()) { auto edgeIt = spaceIt->second->edgeSchemas_.find(edgeType); if (edgeIt != spaceIt->second->edgeSchemas_.end() && !edgeIt->second.empty()) { size_t vNum = edgeIt->second.size(); @@ -1951,9 +2004,9 @@ StatusOr MetaClient::getAllVerTagSchema(GraphSpaceID spaceId) { if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = localCache_.find(spaceId); - if (iter == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.localCache_.find(spaceId); + if (iter == threadLocalInfo.localCache_.end()) { return Status::Error("Space %d not found", spaceId); } return iter->second->tagSchemas_; @@ -1963,9 +2016,9 @@ StatusOr MetaClient::getAllLatestVerTagSchema(const GraphSpaceID& spa if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = localCache_.find(spaceId); - if (iter == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.localCache_.find(spaceId); + if (iter == threadLocalInfo.localCache_.end()) { return Status::Error("Space %d not found", spaceId); } TagSchema tagsSchema; @@ -1981,9 +2034,9 @@ StatusOr MetaClient::getAllVerEdgeSchema(GraphSpaceID spaceId) { if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = localCache_.find(spaceId); - if (iter == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.localCache_.find(spaceId); + if (iter == threadLocalInfo.localCache_.end()) { return Status::Error("Space %d not found", spaceId); } return iter->second->edgeSchemas_; @@ -1993,9 +2046,9 @@ StatusOr MetaClient::getAllLatestVerEdgeSchemaFromCache(const GraphS if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = localCache_.find(spaceId); - if (iter == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.localCache_.find(spaceId); + if (iter == threadLocalInfo.localCache_.end()) { return Status::Error("Space %d not found", spaceId); } EdgeSchema edgesSchema; @@ -2083,9 +2136,9 @@ StatusOr> MetaClient::getTagIndexFromCache(Grap return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } else { @@ -2120,9 +2173,9 @@ StatusOr> MetaClient::getEdgeIndexFromCache(Gra return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } else { @@ -2157,9 +2210,9 @@ StatusOr>> MetaClient::getTagIndexe return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } else { @@ -2180,9 +2233,9 @@ StatusOr>> MetaClient::getEdgeIndex return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } else { @@ -2250,46 +2303,46 @@ StatusOr MetaClient::getLeaderInfo() { const std::vector& MetaClient::getAddresses() { return addrs_; } -std::vector MetaClient::getRolesByUserFromCache(const std::string& user) const { +std::vector MetaClient::getRolesByUserFromCache(const std::string& user) { if (!ready_) { return std::vector(0); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = userRolesMap_.find(user); - if (iter == userRolesMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.userRolesMap_.find(user); + if (iter == threadLocalInfo.userRolesMap_.end()) { return std::vector(0); } return iter->second; } -bool MetaClient::authCheckFromCache(const std::string& account, const std::string& password) const { +bool MetaClient::authCheckFromCache(const std::string& account, const std::string& password) { if (!ready_) { return false; } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = userPasswordMap_.find(account); - if (iter == userPasswordMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.userPasswordMap_.find(account); + if (iter == threadLocalInfo.userPasswordMap_.end()) { return false; } return iter->second == password; } -bool MetaClient::checkShadowAccountFromCache(const std::string& account) const { +bool MetaClient::checkShadowAccountFromCache(const std::string& account) { if (!ready_) { return false; } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto iter = userPasswordMap_.find(account); - if (iter != userPasswordMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto iter = threadLocalInfo.userPasswordMap_.find(account); + if (iter != threadLocalInfo.userPasswordMap_.end()) { return true; } return false; } -StatusOr MetaClient::getTermFromCache(GraphSpaceID spaceId, PartitionID partId) const { - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceInfo = localCache_.find(spaceId); - if (spaceInfo == localCache_.end()) { +StatusOr MetaClient::getTermFromCache(GraphSpaceID spaceId, PartitionID partId) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceInfo = threadLocalInfo.localCache_.find(spaceId); + if (spaceInfo == threadLocalInfo.localCache_.end()) { return Status::Error("Term not found!"); } @@ -2301,13 +2354,13 @@ StatusOr MetaClient::getTermFromCache(GraphSpaceID spaceId, PartitionID return termInfo->second; } -StatusOr> MetaClient::getStorageHosts() const { +StatusOr> MetaClient::getStorageHosts() { if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - return storageHosts_; + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + return threadLocalInfo.storageHosts_; } StatusOr MetaClient::getLatestTagVersionFromCache(const GraphSpaceID& space, @@ -2315,9 +2368,9 @@ StatusOr MetaClient::getLatestTagVersionFromCache(const GraphSpaceID& if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceNewestTagVerMap_.find(std::make_pair(space, tagId)); - if (it == spaceNewestTagVerMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceNewestTagVerMap_.find(std::make_pair(space, tagId)); + if (it == threadLocalInfo.spaceNewestTagVerMap_.end()) { return Status::TagNotFound(); } return it->second; @@ -2328,9 +2381,9 @@ StatusOr MetaClient::getLatestEdgeVersionFromCache(const GraphSpaceID if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto it = spaceNewestEdgeVerMap_.find(std::make_pair(space, edgeType)); - if (it == spaceNewestEdgeVerMap_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto it = threadLocalInfo.spaceNewestEdgeVerMap_.find(std::make_pair(space, edgeType)); + if (it == threadLocalInfo.spaceNewestEdgeVerMap_.end()) { return Status::EdgeNotFound(); } return it->second; @@ -2801,9 +2854,9 @@ MetaClient::getListenersBySpaceHostFromCache(GraphSpaceID spaceId, const HostAdd if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } @@ -2820,8 +2873,8 @@ StatusOr MetaClient::getListenersByHostFromCache(const HostAddr& h if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - return doGetListenersMap(host, localCache_); + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + return doGetListenersMap(host, threadLocalInfo.localCache_); } ListenersMap MetaClient::doGetListenersMap(const HostAddr& host, const LocalCache& localCache) { @@ -2857,9 +2910,9 @@ StatusOr MetaClient::getListenerHostsBySpacePartType(GraphSpaceID spac if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } @@ -2878,9 +2931,9 @@ StatusOr> MetaClient::getListenerHostTypeBySpace if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - auto spaceIt = localCache_.find(spaceId); - if (spaceIt == localCache_.end()) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + auto spaceIt = threadLocalInfo.localCache_.find(spaceId); + if (spaceIt == threadLocalInfo.localCache_.end()) { VLOG(3) << "Space " << spaceId << " not found!"; return Status::SpaceNotFound(); } @@ -2899,6 +2952,9 @@ StatusOr> MetaClient::getListenerHostTypeBySpace } bool MetaClient::loadCfg() { + if (options_.skipConfig_ || localCfgLastUpdateTime_ == metadLastUpdateTime_) { + return true; + } if (!configReady_ && !registerCfg()) { return false; } @@ -2929,6 +2985,7 @@ bool MetaClient::loadCfg() { LOG(ERROR) << "Load configs failed: " << ret.status(); return false; } + localCfgLastUpdateTime_.store(metadLastUpdateTime_.load()); return true; } @@ -2936,7 +2993,7 @@ void MetaClient::updateGflagsValue(const cpp2::ConfigItem& item) { if (item.get_mode() != cpp2::ConfigMode::MUTABLE) { return; } - auto value = item.get_value(); + const auto& value = item.get_value(); std::string curValue; if (!gflags::GetCommandLineOption(item.get_name().c_str(), &curValue)) { return; @@ -2965,8 +3022,8 @@ void MetaClient::updateNestedGflags(const std::unordered_map optionMap.emplace(value.first, value.second.toString()); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - for (const auto& spaceEntry : localCache_) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + for (const auto& spaceEntry : threadLocalInfo.localCache_) { listener_->onSpaceOptionUpdated(spaceEntry.first, optionMap); } } @@ -3352,8 +3409,8 @@ StatusOr> MetaClient::getFTIndexe if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - return fulltextIndexMap_; + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + return threadLocalInfo.fulltextIndexMap_; } StatusOr> MetaClient::getFTIndexBySpaceFromCache( @@ -3361,11 +3418,11 @@ StatusOr> MetaClient::getFTIndexB if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); std::unordered_map indexes; - for (auto it = fulltextIndexMap_.begin(); it != fulltextIndexMap_.end(); ++it) { - if (it->second.get_space_id() == spaceId) { - indexes[it->first] = it->second; + for (const auto& it : threadLocalInfo.fulltextIndexMap_) { + if (it.second.get_space_id() == spaceId) { + indexes[it.first] = it.second; } } return indexes; @@ -3376,13 +3433,13 @@ StatusOr> MetaClient::getFTIndexBySpaceSch if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - for (auto it = fulltextIndexMap_.begin(); it != fulltextIndexMap_.end(); ++it) { - auto id = it->second.get_depend_schema().getType() == nebula::cpp2::SchemaID::Type::edge_type - ? it->second.get_depend_schema().get_edge_type() - : it->second.get_depend_schema().get_tag_id(); - if (it->second.get_space_id() == spaceId && id == schemaId) { - return std::make_pair(it->first, it->second); + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + for (auto& it : threadLocalInfo.fulltextIndexMap_) { + auto id = it.second.get_depend_schema().getType() == nebula::cpp2::SchemaID::Type::edge_type + ? it.second.get_depend_schema().get_edge_type() + : it.second.get_depend_schema().get_tag_id(); + if (it.second.get_space_id() == spaceId && id == schemaId) { + return std::make_pair(it.first, it.second); } } return Status::IndexNotFound(); @@ -3393,12 +3450,12 @@ StatusOr MetaClient::getFTIndexByNameFromCache(GraphSpaceID space if (!ready_) { return Status::Error("Not ready!"); } - folly::RWSpinLock::ReadHolder holder(localCacheLock_); - if (fulltextIndexMap_.find(name) != fulltextIndexMap_.end() && - fulltextIndexMap_[name].get_space_id() != spaceId) { + const ThreadLocalInfo& threadLocalInfo = getThreadLocalInfo(); + if (threadLocalInfo.fulltextIndexMap_.find(name) != fulltextIndexMap_.end() && + threadLocalInfo.fulltextIndexMap_.at(name).get_space_id() != spaceId) { return Status::IndexNotFound(); } - return fulltextIndexMap_[name]; + return threadLocalInfo.fulltextIndexMap_.at(name); } folly::Future> MetaClient::createSession( diff --git a/src/clients/meta/MetaClient.h b/src/clients/meta/MetaClient.h index 6605404ca53..445cabdebf0 100644 --- a/src/clients/meta/MetaClient.h +++ b/src/clients/meta/MetaClient.h @@ -16,6 +16,7 @@ #include #include "common/base/Base.h" +#include "common/base/ObjectPool.h" #include "common/base/Status.h" #include "common/base/StatusOr.h" #include "common/meta/Common.h" @@ -74,14 +75,36 @@ struct SpaceInfoCache { cpp2::SpaceDesc spaceDesc_; PartsAlloc partsAlloc_; std::unordered_map> partsOnHost_; + std::vector tagItemVec_; TagSchemas tagSchemas_; + std::vector edgeItemVec_; EdgeSchemas edgeSchemas_; + std::vector tagIndexItemVec_; Indexes tagIndexes_; + std::vector edgeIndexItemVec_; Indexes edgeIndexes_; Listeners listeners_; // objPool used to decode when adding field ObjectPool pool_; std::unordered_map termOfPartition_; + + SpaceInfoCache() = default; + SpaceInfoCache(const SpaceInfoCache& info) + : spaceDesc_(info.spaceDesc_), + partsAlloc_(info.partsAlloc_), + partsOnHost_(info.partsOnHost_), + tagItemVec_(info.tagItemVec_), + tagSchemas_(info.tagSchemas_), + edgeItemVec_(info.edgeItemVec_), + edgeSchemas_(info.edgeSchemas_), + tagIndexItemVec_(info.tagIndexItemVec_), + tagIndexes_(info.tagIndexes_), + edgeIndexItemVec_(info.edgeIndexItemVec_), + edgeIndexes_(info.edgeIndexes_), + listeners_(info.listeners_), + termOfPartition_(info.termOfPartition_) {} + + ~SpaceInfoCache() = default; }; using LocalCache = std::unordered_map>; @@ -501,7 +524,7 @@ class MetaClient { Status checkSpaceExistInCache(const HostAddr& host, GraphSpaceID spaceId); - StatusOr partsNum(GraphSpaceID spaceId) const; + StatusOr partsNum(GraphSpaceID spaceId); PartitionID partId(int32_t numParts, VertexID id) const; @@ -559,15 +582,15 @@ class MetaClient { EdgeType edgeType, const std::string& field); - std::vector getRolesByUserFromCache(const std::string& user) const; + std::vector getRolesByUserFromCache(const std::string& user); - bool authCheckFromCache(const std::string& account, const std::string& password) const; + bool authCheckFromCache(const std::string& account, const std::string& password); - StatusOr getTermFromCache(GraphSpaceID spaceId, PartitionID) const; + StatusOr getTermFromCache(GraphSpaceID spaceId, PartitionID); - bool checkShadowAccountFromCache(const std::string& account) const; + bool checkShadowAccountFromCache(const std::string& account); - StatusOr> getStorageHosts() const; + StatusOr> getStorageHosts(); StatusOr getSessionFromCache(const nebula::SessionID& session_id); @@ -719,8 +742,10 @@ class MetaClient { // leaderIdsLock_ is used to protect leaderIds_ std::unordered_map> leaderIds_; folly::RWSpinLock leaderIdsLock_; - int64_t localLastUpdateTime_{0}; - int64_t metadLastUpdateTime_{0}; + std::atomic localDataLastUpdateTime_{-1}; + std::atomic localCfgLastUpdateTime_{-1}; + std::atomic metadLastUpdateTime_{0}; + int64_t metaServerVersion_{-1}; static constexpr int64_t EXPECT_META_VERSION = 2; @@ -736,6 +761,31 @@ class MetaClient { HostAddr leader_; HostAddr localHost_; + struct ThreadLocalInfo { + int64_t localLastUpdateTime_{-2}; + LocalCache localCache_; + SpaceNameIdMap spaceIndexByName_; + SpaceTagNameIdMap spaceTagIndexByName_; + SpaceEdgeNameTypeMap spaceEdgeIndexByName_; + SpaceEdgeTypeNameMap spaceEdgeIndexByType_; + SpaceTagIdNameMap spaceTagIndexById_; + SpaceNewestTagVerMap spaceNewestTagVerMap_; + SpaceNewestEdgeVerMap spaceNewestEdgeVerMap_; + SpaceAllEdgeMap spaceAllEdgeMap_; + + UserRolesMap userRolesMap_; + std::vector storageHosts_; + FTIndexMap fulltextIndexMap_; + UserPasswordMap userPasswordMap_; + }; + + const ThreadLocalInfo& getThreadLocalInfo(); + + void addSchemaField(NebulaSchemaProvider* schema, const cpp2::ColumnDef& col, ObjectPool* pool); + + TagSchemas buildTagSchemas(std::vector tagItemVec, ObjectPool* pool); + EdgeSchemas buildEdgeSchemas(std::vector edgeItemVec, ObjectPool* pool); + std::unique_ptr bgThread_; SpaceNameIdMap spaceIndexByName_; SpaceTagNameIdMap spaceTagIndexByName_; diff --git a/src/graph/service/CloudAuthenticator.cpp b/src/graph/service/CloudAuthenticator.cpp index d69ea54c656..31830779882 100644 --- a/src/graph/service/CloudAuthenticator.cpp +++ b/src/graph/service/CloudAuthenticator.cpp @@ -12,7 +12,7 @@ namespace nebula { namespace graph { -CloudAuthenticator::CloudAuthenticator(const meta::MetaClient* client) { metaClient_ = client; } +CloudAuthenticator::CloudAuthenticator(meta::MetaClient* client) { metaClient_ = client; } bool CloudAuthenticator::auth(const std::string& user, const std::string& password) { // The shadow account on the nebula side has been created diff --git a/src/graph/service/CloudAuthenticator.h b/src/graph/service/CloudAuthenticator.h index 0b54d0d39eb..04e37b5ebde 100644 --- a/src/graph/service/CloudAuthenticator.h +++ b/src/graph/service/CloudAuthenticator.h @@ -15,12 +15,12 @@ namespace graph { class CloudAuthenticator final : public Authenticator { public: - explicit CloudAuthenticator(const meta::MetaClient* client); + explicit CloudAuthenticator(meta::MetaClient* client); bool auth(const std::string& user, const std::string& password) override; private: - const meta::MetaClient* metaClient_; + meta::MetaClient* metaClient_; }; } // namespace graph diff --git a/src/graph/service/PasswordAuthenticator.cpp b/src/graph/service/PasswordAuthenticator.cpp index 4326c7d5b74..833200dec53 100644 --- a/src/graph/service/PasswordAuthenticator.cpp +++ b/src/graph/service/PasswordAuthenticator.cpp @@ -8,9 +8,7 @@ namespace nebula { namespace graph { -PasswordAuthenticator::PasswordAuthenticator(const meta::MetaClient* client) { - metaClient_ = client; -} +PasswordAuthenticator::PasswordAuthenticator(meta::MetaClient* client) { metaClient_ = client; } bool PasswordAuthenticator::auth(const std::string& user, const std::string& password) { return metaClient_->authCheckFromCache(user, password); diff --git a/src/graph/service/PasswordAuthenticator.h b/src/graph/service/PasswordAuthenticator.h index 344ca2415af..3a8bbf5abd8 100644 --- a/src/graph/service/PasswordAuthenticator.h +++ b/src/graph/service/PasswordAuthenticator.h @@ -14,12 +14,12 @@ namespace graph { class PasswordAuthenticator final : public Authenticator { public: - explicit PasswordAuthenticator(const meta::MetaClient* client); + explicit PasswordAuthenticator(meta::MetaClient* client); bool auth(const std::string& user, const std::string& password) override; private: - const meta::MetaClient* metaClient_; + meta::MetaClient* metaClient_; }; } // namespace graph diff --git a/src/graph/service/QueryEngine.h b/src/graph/service/QueryEngine.h index 505a844373d..a0b777bc35b 100644 --- a/src/graph/service/QueryEngine.h +++ b/src/graph/service/QueryEngine.h @@ -38,7 +38,7 @@ class QueryEngine final : public cpp::NonCopyable, public cpp::NonMovable { using RequestContextPtr = std::unique_ptr>; void execute(RequestContextPtr rctx); - const meta::MetaClient* metaClient() const { return metaClient_; } + meta::MetaClient* metaClient() { return metaClient_; } private: Status setupMemoryMonitorThread(); From 289aadb673e657843dc7d1979632359b712c9433 Mon Sep 17 00:00:00 2001 From: "jie.wang" <38901892+jievince@users.noreply.github.com> Date: Mon, 15 Nov 2021 10:50:58 +0800 Subject: [PATCH 14/53] fix typo error of KW_VALUE (#3303) --- src/parser/parser.yy | 3 +- src/parser/scanner.lex | 2 +- src/parser/test/ScannerTest.cpp | 4 +-- tests/tck/features/bugfix/TypoError.feature | 34 +++++++++++++++++++++ 4 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 tests/tck/features/bugfix/TypoError.feature diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 7b5259a7eaf..fce96a5aec6 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -162,7 +162,7 @@ static constexpr size_t kCommentLengthLimit = 256; %token KW_BOOL KW_INT8 KW_INT16 KW_INT32 KW_INT64 KW_INT KW_FLOAT KW_DOUBLE %token KW_STRING KW_FIXED_STRING KW_TIMESTAMP KW_DATE KW_TIME KW_DATETIME %token KW_GO KW_AS KW_TO KW_USE KW_SET KW_FROM KW_WHERE KW_ALTER -%token KW_MATCH KW_INSERT KW_VALUES KW_YIELD KW_RETURN KW_CREATE KW_VERTEX KW_VERTICES +%token KW_MATCH KW_INSERT KW_VALUE KW_VALUES KW_YIELD KW_RETURN KW_CREATE KW_VERTEX KW_VERTICES %token KW_EDGE KW_EDGES KW_STEPS KW_OVER KW_UPTO KW_REVERSELY KW_SPACE KW_DELETE KW_FIND %token KW_TAG KW_TAGS KW_UNION KW_INTERSECT KW_MINUS %token KW_NO KW_OVERWRITE KW_IN KW_DESCRIBE KW_DESC KW_SHOW KW_HOST KW_HOSTS KW_PART KW_PARTS KW_ADD @@ -433,6 +433,7 @@ legal_integer */ unreserved_keyword : KW_SPACE { $$ = new std::string("space"); } + | KW_VALUE { $$ = new std::string("value"); } | KW_VALUES { $$ = new std::string("values"); } | KW_HOST { $$ = new std::string("host"); } | KW_HOSTS { $$ = new std::string("hosts"); } diff --git a/src/parser/scanner.lex b/src/parser/scanner.lex index 6c316e4b2bf..2b209bee00d 100644 --- a/src/parser/scanner.lex +++ b/src/parser/scanner.lex @@ -157,7 +157,7 @@ IP_OCTET ([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]) "HOSTS" { return TokenType::KW_HOSTS; } "SPACE" { return TokenType::KW_SPACE; } "SPACES" { return TokenType::KW_SPACES; } -"VALUE" { return TokenType::KW_VALUES; } +"VALUE" { return TokenType::KW_VALUE; } "VALUES" { return TokenType::KW_VALUES; } "USER" { return TokenType::KW_USER; } "USERS" { return TokenType::KW_USERS; } diff --git a/src/parser/test/ScannerTest.cpp b/src/parser/test/ScannerTest.cpp index 9f1c155e868..d9e7009fdb4 100644 --- a/src/parser/test/ScannerTest.cpp +++ b/src/parser/test/ScannerTest.cpp @@ -181,8 +181,8 @@ TEST(Scanner, Basic) { CHECK_SEMANTIC_TYPE("match", TokenType::KW_MATCH), CHECK_SEMANTIC_TYPE("INSERT", TokenType::KW_INSERT), CHECK_SEMANTIC_TYPE("insert", TokenType::KW_INSERT), - CHECK_SEMANTIC_TYPE("VALUE", TokenType::KW_VALUES), - CHECK_SEMANTIC_TYPE("value", TokenType::KW_VALUES), + CHECK_SEMANTIC_TYPE("VALUE", TokenType::KW_VALUE), + CHECK_SEMANTIC_TYPE("value", TokenType::KW_VALUE), CHECK_SEMANTIC_TYPE("VALUES", TokenType::KW_VALUES), CHECK_SEMANTIC_TYPE("values", TokenType::KW_VALUES), CHECK_SEMANTIC_TYPE("YIELD", TokenType::KW_YIELD), diff --git a/tests/tck/features/bugfix/TypoError.feature b/tests/tck/features/bugfix/TypoError.feature new file mode 100644 index 00000000000..795b2b2dfc8 --- /dev/null +++ b/tests/tck/features/bugfix/TypoError.feature @@ -0,0 +1,34 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Typo error + + # issue https://github.com/vesoft-inc/nebula/issues/2204 + Scenario: Typo error of KW_VALUE + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + When executing query: + """ + CREATE tag value(value int, values bool) + """ + Then the execution should be successful + When executing query: + """ + DESC TAG value; + """ + Then the result should be, in any order: + | Field | Type | Null | Default | Comment | + | "value" | "int64" | "YES" | EMPTY | EMPTY | + | "values" | "bool" | "YES" | EMPTY | EMPTY | + When executing query: + """ + SHOW CREATE TAG value + """ + Then the result should be, in any order, with relax comparison: + | Tag | Create Tag | + | "value" | 'CREATE TAG `value` (\n `value` int64 NULL,\n `values` bool NULL\n) ttl_duration = 0, ttl_col = ""' | From 026dea5bb002b356ce3a1d1530fcadb0f512c6a3 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Mon, 15 Nov 2021 12:17:01 +0800 Subject: [PATCH 15/53] Merge all expression tests to save disk consumption (#3301) * Merge all expression tests You can use --gtest_filter to run your special tests * Restore expr mock obj * Fix compile error * fix compile --- .../test/AggregateExpressionTest.cpp | 8 - .../test/ArithmeticExpressionTest.cpp | 8 - .../test/AttributeExpressionTest.cpp | 8 - src/common/expression/test/CMakeLists.txt | 444 +----------------- .../expression/test/CaseExpressionTest.cpp | 8 - .../expression/test/ColumnExpressionTest.cpp | 8 - .../test/ConstantExpressionTest.cpp | 8 - .../test/ContainerExpressionTest.cpp | 8 - .../expression/test/EncodeDecodeTest.cpp | 8 - .../test/FunctionCallExpressionTest.cpp | 8 - .../expression/test/LabelExpressionTest.cpp | 8 - .../test/ListComprehensionExpressionTest.cpp | 8 - .../expression/test/LogicalExpressionTest.cpp | 8 - .../test/PathBuildExpressionTest.cpp | 8 - .../test/PredicateExpressionTest.cpp | 8 - .../test/PropertyExpressionTest.cpp | 8 - .../expression/test/ReduceExpressionTest.cpp | 8 - .../test/RelationalExpressionTest.cpp | 8 - .../test/SubscriptExpressionTest.cpp | 8 - src/common/expression/test/TestBase.cpp | 32 ++ src/common/expression/test/TestBase.h | 25 +- .../test/TypeCastingExpressionTest.cpp | 8 - .../expression/test/UnaryExpressionTest.cpp | 8 - .../test/VersionedVariableExpressionTest.cpp | 8 - 24 files changed, 60 insertions(+), 609 deletions(-) create mode 100644 src/common/expression/test/TestBase.cpp diff --git a/src/common/expression/test/AggregateExpressionTest.cpp b/src/common/expression/test/AggregateExpressionTest.cpp index 2f797face1e..9a7281549e1 100644 --- a/src/common/expression/test/AggregateExpressionTest.cpp +++ b/src/common/expression/test/AggregateExpressionTest.cpp @@ -350,11 +350,3 @@ TEST_F(ExpressionTest, AggregateToString) { } } // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ArithmeticExpressionTest.cpp b/src/common/expression/test/ArithmeticExpressionTest.cpp index e008f401c36..689f6de5392 100644 --- a/src/common/expression/test/ArithmeticExpressionTest.cpp +++ b/src/common/expression/test/ArithmeticExpressionTest.cpp @@ -119,11 +119,3 @@ TEST_F(ArithmeticExpressionTest, TestArithmeticExpression) { } } } // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/AttributeExpressionTest.cpp b/src/common/expression/test/AttributeExpressionTest.cpp index 40482ab98c8..5b3dbbe5870 100644 --- a/src/common/expression/test/AttributeExpressionTest.cpp +++ b/src/common/expression/test/AttributeExpressionTest.cpp @@ -174,11 +174,3 @@ TEST_F(AttributeExpressionTest, DateTimeAttribute) { } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/CMakeLists.txt b/src/common/expression/test/CMakeLists.txt index 6036f7679a3..a58a3341f66 100644 --- a/src/common/expression/test/CMakeLists.txt +++ b/src/common/expression/test/CMakeLists.txt @@ -49,13 +49,34 @@ set(expression_test_common_libs nebula_add_library( expr_ctx_mock_obj OBJECT ExpressionContextMock.cpp - ) + TestBase.cpp +) nebula_add_test( NAME expression_test SOURCES ExpressionTest.cpp + EncodeDecodeTest.cpp + AggregateExpressionTest.cpp + ArithmeticExpressionTest.cpp + AttributeExpressionTest.cpp + CaseExpressionTest.cpp + ColumnExpressionTest.cpp + ConstantExpressionTest.cpp + ContainerExpressionTest.cpp + FunctionCallExpressionTest.cpp + LabelExpressionTest.cpp + ListComprehensionExpressionTest.cpp + LogicalExpressionTest.cpp + RelationalExpressionTest.cpp + PathBuildExpressionTest.cpp + PropertyExpressionTest.cpp + PredicateExpressionTest.cpp + ReduceExpressionTest.cpp + SubscriptExpressionTest.cpp + TypeCastingExpressionTest.cpp + VersionedVariableExpressionTest.cpp OBJECTS $ $ @@ -141,424 +162,3 @@ nebula_add_executable( boost_regex ${THRIFT_LIBRARIES} ) - - -nebula_add_test( - NAME expression_encode_decode_test - SOURCES EncodeDecodeTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME aggregate_expression_test - SOURCES AggregateExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME arithmetic_expression_test - SOURCES ArithmeticExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME attribute_expression_test - SOURCES AttributeExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME case_expression_test - SOURCES CaseExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME column_expression_test - SOURCES ColumnExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME constant_expression_test - SOURCES ConstantExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME container_expression_test - SOURCES ContainerExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME function_call_expression_test - SOURCES FunctionCallExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME label_expression_test - SOURCES LabelExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME list_comprehension_expression_test - SOURCES ListComprehensionExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME logical_expression_test - SOURCES LogicalExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME relational_expression_test - SOURCES RelationalExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME path_build_expression_test - SOURCES PathBuildExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME property_expression_test - SOURCES PropertyExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME predicate_expression_test - SOURCES PredicateExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME reduce_expression_test - SOURCES ReduceExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME subscript_expression_test - SOURCES SubscriptExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME type_casting_expression_test - SOURCES TypeCastingExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) - -nebula_add_test( - NAME versioned_variable_expression_test - SOURCES VersionedVariableExpressionTest.cpp - OBJECTS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - ${expression_test_common_libs} - LIBRARIES - gtest - ${THRIFT_LIBRARIES} - ${PROXYGEN_LIBRARIES} -) diff --git a/src/common/expression/test/CaseExpressionTest.cpp b/src/common/expression/test/CaseExpressionTest.cpp index 7d318bf355c..2a0f974f232 100644 --- a/src/common/expression/test/CaseExpressionTest.cpp +++ b/src/common/expression/test/CaseExpressionTest.cpp @@ -234,11 +234,3 @@ TEST_F(CaseExpressionTest, CaseEvaluate) { } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ColumnExpressionTest.cpp b/src/common/expression/test/ColumnExpressionTest.cpp index 18f45306246..754e99cb83a 100644 --- a/src/common/expression/test/ColumnExpressionTest.cpp +++ b/src/common/expression/test/ColumnExpressionTest.cpp @@ -56,11 +56,3 @@ TEST_F(ExpressionTest, ColumnExpression) { } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ConstantExpressionTest.cpp b/src/common/expression/test/ConstantExpressionTest.cpp index 912a983bcb5..55eecb27e6d 100644 --- a/src/common/expression/test/ConstantExpressionTest.cpp +++ b/src/common/expression/test/ConstantExpressionTest.cpp @@ -103,11 +103,3 @@ TEST_F(ExpressionTest, Constant) { } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ContainerExpressionTest.cpp b/src/common/expression/test/ContainerExpressionTest.cpp index ec1f4e10f97..30845ac2750 100644 --- a/src/common/expression/test/ContainerExpressionTest.cpp +++ b/src/common/expression/test/ContainerExpressionTest.cpp @@ -102,11 +102,3 @@ TEST_F(ExpressionTest, MapEvaluate) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/EncodeDecodeTest.cpp b/src/common/expression/test/EncodeDecodeTest.cpp index 8b68f75492c..090e63594ce 100644 --- a/src/common/expression/test/EncodeDecodeTest.cpp +++ b/src/common/expression/test/EncodeDecodeTest.cpp @@ -523,11 +523,3 @@ TEST(ExpressionEncodeDecode, ListComprehensionExpression) { } } // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/FunctionCallExpressionTest.cpp b/src/common/expression/test/FunctionCallExpressionTest.cpp index e47dfe3edab..160998f8182 100644 --- a/src/common/expression/test/FunctionCallExpressionTest.cpp +++ b/src/common/expression/test/FunctionCallExpressionTest.cpp @@ -130,11 +130,3 @@ TEST_F(FunctionCallExpressionTest, FunctionCallToStringTest) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/LabelExpressionTest.cpp b/src/common/expression/test/LabelExpressionTest.cpp index e6f3819696a..63a4b36f2e5 100644 --- a/src/common/expression/test/LabelExpressionTest.cpp +++ b/src/common/expression/test/LabelExpressionTest.cpp @@ -20,11 +20,3 @@ TEST_F(LabelExpressionTest, LabelEvaluate) { ASSERT_EQ("name", value.getStr()); } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ListComprehensionExpressionTest.cpp b/src/common/expression/test/ListComprehensionExpressionTest.cpp index 8580b375ebb..dbb88ae7ab8 100644 --- a/src/common/expression/test/ListComprehensionExpressionTest.cpp +++ b/src/common/expression/test/ListComprehensionExpressionTest.cpp @@ -118,11 +118,3 @@ TEST_F(ListComprehensionExpressionTest, ListComprehensionExprToString) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/LogicalExpressionTest.cpp b/src/common/expression/test/LogicalExpressionTest.cpp index 4c17c416877..304fa17ea01 100644 --- a/src/common/expression/test/LogicalExpressionTest.cpp +++ b/src/common/expression/test/LogicalExpressionTest.cpp @@ -565,11 +565,3 @@ TEST_F(LogicalExpressionTest, LogicalCalculation) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/PathBuildExpressionTest.cpp b/src/common/expression/test/PathBuildExpressionTest.cpp index 40899676322..5f589c8863a 100644 --- a/src/common/expression/test/PathBuildExpressionTest.cpp +++ b/src/common/expression/test/PathBuildExpressionTest.cpp @@ -126,11 +126,3 @@ TEST_F(PathBuildExpressionTest, PathBuildToString) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/PredicateExpressionTest.cpp b/src/common/expression/test/PredicateExpressionTest.cpp index aaa31c4fc6f..5c2f0069b9b 100644 --- a/src/common/expression/test/PredicateExpressionTest.cpp +++ b/src/common/expression/test/PredicateExpressionTest.cpp @@ -140,11 +140,3 @@ TEST_F(PredicateExpressionTest, PredicateExprToString) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/PropertyExpressionTest.cpp b/src/common/expression/test/PropertyExpressionTest.cpp index 32972691c8f..ec8ae54fad5 100644 --- a/src/common/expression/test/PropertyExpressionTest.cpp +++ b/src/common/expression/test/PropertyExpressionTest.cpp @@ -127,11 +127,3 @@ TEST_F(PropertyExpressionTest, PropertyToStringTest) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/ReduceExpressionTest.cpp b/src/common/expression/test/ReduceExpressionTest.cpp index 7a0105e5ccb..0c42b294efc 100644 --- a/src/common/expression/test/ReduceExpressionTest.cpp +++ b/src/common/expression/test/ReduceExpressionTest.cpp @@ -55,11 +55,3 @@ TEST_F(ReduceExpressionTest, ReduceExprToString) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/RelationalExpressionTest.cpp b/src/common/expression/test/RelationalExpressionTest.cpp index d194f61618a..571926754f4 100644 --- a/src/common/expression/test/RelationalExpressionTest.cpp +++ b/src/common/expression/test/RelationalExpressionTest.cpp @@ -1405,11 +1405,3 @@ TEST_F(RelationalExpressionTest, NotContainsToString) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/SubscriptExpressionTest.cpp b/src/common/expression/test/SubscriptExpressionTest.cpp index cdbe6cdd5d9..622557d7b5a 100644 --- a/src/common/expression/test/SubscriptExpressionTest.cpp +++ b/src/common/expression/test/SubscriptExpressionTest.cpp @@ -456,11 +456,3 @@ TEST_F(SubscriptExpressionTest, EdgeSubscript) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/TestBase.cpp b/src/common/expression/test/TestBase.cpp new file mode 100644 index 00000000000..266f305f24b --- /dev/null +++ b/src/common/expression/test/TestBase.cpp @@ -0,0 +1,32 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "common/expression/test/TestBase.h" + +namespace nebula { + +ExpressionContextMock gExpCtxt; +ObjectPool pool; + +std::unordered_map> args_ = { + {"null", {}}, + {"int", {4}}, + {"float", {1.1}}, + {"neg_int", {-1}}, + {"neg_float", {-1.1}}, + {"rand", {1, 10}}, + {"one", {-1.2}}, + {"two", {2, 4}}, + {"pow", {2, 3}}, + {"string", {"AbcDeFG"}}, + {"trim", {" abc "}}, + {"substr", {"abcdefghi", 2, 4}}, + {"side", {"abcdefghijklmnopq", 5}}, + {"neg_side", {"abcdefghijklmnopq", -2}}, + {"pad", {"abcdefghijkl", 16, "123"}}, + {"udf_is_in", {4, 1, 2, 8, 4, 3, 1, 0}}, +}; + +} // namespace nebula diff --git a/src/common/expression/test/TestBase.h b/src/common/expression/test/TestBase.h index 8217e0db337..7969f5a70ee 100644 --- a/src/common/expression/test/TestBase.h +++ b/src/common/expression/test/TestBase.h @@ -47,9 +47,12 @@ #include "common/expression/test/ExpressionContextMock.h" #include "parser/GQLParser.h" -nebula::ExpressionContextMock gExpCtxt; -nebula::ObjectPool pool; namespace nebula { + +extern ExpressionContextMock gExpCtxt; +extern ObjectPool pool; +extern std::unordered_map> args_; + class ExpressionTest : public ::testing::Test { public: void SetUp() override {} @@ -168,22 +171,6 @@ class ExpressionTest : public ::testing::Test { path.steps.emplace_back(std::move(step)); \ } while (0) -static std::unordered_map> args_ = { - {"null", {}}, - {"int", {4}}, - {"float", {1.1}}, - {"neg_int", {-1}}, - {"neg_float", {-1.1}}, - {"rand", {1, 10}}, - {"one", {-1.2}}, - {"two", {2, 4}}, - {"pow", {2, 3}}, - {"string", {"AbcDeFG"}}, - {"trim", {" abc "}}, - {"substr", {"abcdefghi", 2, 4}}, - {"side", {"abcdefghijklmnopq", 5}}, - {"neg_side", {"abcdefghijklmnopq", -2}}, - {"pad", {"abcdefghijkl", 16, "123"}}, - {"udf_is_in", {4, 1, 2, 8, 4, 3, 1, 0}}}; } // namespace nebula + #endif // COMMON_EXPRESSION_TEST_TESTBASE_H_ diff --git a/src/common/expression/test/TypeCastingExpressionTest.cpp b/src/common/expression/test/TypeCastingExpressionTest.cpp index 1e9696b9aa0..801b7d32621 100644 --- a/src/common/expression/test/TypeCastingExpressionTest.cpp +++ b/src/common/expression/test/TypeCastingExpressionTest.cpp @@ -158,11 +158,3 @@ TEST_F(TypeCastingExpressionTest, TypeCastTest) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/UnaryExpressionTest.cpp b/src/common/expression/test/UnaryExpressionTest.cpp index 0e26a2efa2e..8bce1c1fb52 100644 --- a/src/common/expression/test/UnaryExpressionTest.cpp +++ b/src/common/expression/test/UnaryExpressionTest.cpp @@ -177,11 +177,3 @@ TEST_F(UnaryExpressionTest, UnaryDECR) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/common/expression/test/VersionedVariableExpressionTest.cpp b/src/common/expression/test/VersionedVariableExpressionTest.cpp index 9f62ccf87ff..f6836c5386e 100644 --- a/src/common/expression/test/VersionedVariableExpressionTest.cpp +++ b/src/common/expression/test/VersionedVariableExpressionTest.cpp @@ -45,11 +45,3 @@ TEST_F(VersionedVariableExpressionTest, VersionedVar) { } } } // namespace nebula - -int main(int argc, char **argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} From be4d5bbfd9125673acb5d71a02e1ddc903b9ee5d Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Mon, 15 Nov 2021 17:30:25 +0800 Subject: [PATCH 16/53] Cancel memory check when the ratio greater than 1.0 (#3289) * Cancel memory check when the ratio greater than 1.0 * Comment Co-authored-by: Yichen Wang <18348405+Aiee@users.noreply.github.com> --- conf/nebula-graphd.conf.default | 2 +- conf/nebula-graphd.conf.production | 2 +- src/common/memory/MemoryUtils.cpp | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/conf/nebula-graphd.conf.default b/conf/nebula-graphd.conf.default index fa406f8008a..43792df94c3 100644 --- a/conf/nebula-graphd.conf.default +++ b/conf/nebula-graphd.conf.default @@ -79,7 +79,7 @@ --auth_type=password ########## memory ########## -# System memory high watermark ratio +# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0 --system_memory_high_watermark_ratio=0.8 ########## experimental feature ########## diff --git a/conf/nebula-graphd.conf.production b/conf/nebula-graphd.conf.production index 26bc28827cf..09f26e5169e 100644 --- a/conf/nebula-graphd.conf.production +++ b/conf/nebula-graphd.conf.production @@ -77,7 +77,7 @@ --auth_type=password ########## memory ########## -# System memory high watermark ratio +# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0 --system_memory_high_watermark_ratio=0.8 ########## experimental feature ########## diff --git a/src/common/memory/MemoryUtils.cpp b/src/common/memory/MemoryUtils.cpp index 442e7a87597..43477149eff 100644 --- a/src/common/memory/MemoryUtils.cpp +++ b/src/common/memory/MemoryUtils.cpp @@ -27,6 +27,9 @@ static const std::regex reTotalCache(R"(^total_(cache|inactive_file)\s+(\d+)$)") std::atomic_bool MemoryUtils::kHitMemoryHighWatermark{false}; StatusOr MemoryUtils::hitsHighWatermark() { + if (FLAGS_system_memory_high_watermark_ratio >= 1.0) { + return false; + } double available = 0.0, total = 0.0; if (FLAGS_containerized) { FileUtils::FileLineIterator iter("/sys/fs/cgroup/memory/memory.stat", &reTotalCache); From fc2977bd945596074e26ed5fe3a3b8bc8a69d112 Mon Sep 17 00:00:00 2001 From: Yichen Wang <18348405+Aiee@users.noreply.github.com> Date: Mon, 15 Nov 2021 23:11:35 +0800 Subject: [PATCH 17/53] Avoid conflict with nebula ent (#3307) Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- src/daemons/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/daemons/CMakeLists.txt b/src/daemons/CMakeLists.txt index ac98b67ff03..0fcbcbcd8d0 100644 --- a/src/daemons/CMakeLists.txt +++ b/src/daemons/CMakeLists.txt @@ -32,6 +32,7 @@ set(common_deps $ $ $ + $ ) set(storage_meta_deps @@ -130,7 +131,6 @@ nebula_add_executable( $ $ $ - $ ${common_deps} LIBRARIES ${PROXYGEN_LIBRARIES} From e32c06d9355c6b773001ad1732bd141cf733e587 Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Tue, 16 Nov 2021 10:37:49 +0800 Subject: [PATCH 18/53] Modify index bound value (#3285) * modify boundValue * add tck test * format tck test file * rm function * format feature file * fix feature error * format feature file * fix bug * fix bug --- src/common/utils/IndexKeyUtils.h | 2 +- src/graph/optimizer/OptimizerUtils.cpp | 659 +++--------------- src/graph/optimizer/OptimizerUtils.h | 29 +- src/graph/optimizer/rule/IndexScanRule.cpp | 77 +- src/graph/optimizer/rule/IndexScanRule.h | 1 - src/graph/optimizer/test/CMakeLists.txt | 14 - .../optimizer/test/IndexBoundValueTest.cpp | 315 --------- .../optimizer/test/IndexScanRuleTest.cpp | 169 ++--- src/graph/util/ToJson.cpp | 4 + src/storage/exec/IndexScanNode.cpp | 14 +- .../bugfix/TruncatedStringIndex.feature | 75 ++ .../features/optimizer/IndexScanRule.feature | 82 ++- 12 files changed, 357 insertions(+), 1084 deletions(-) delete mode 100644 src/graph/optimizer/test/IndexBoundValueTest.cpp create mode 100644 tests/tck/features/bugfix/TruncatedStringIndex.feature diff --git a/src/common/utils/IndexKeyUtils.h b/src/common/utils/IndexKeyUtils.h index 5bb6c5c07a8..1e5f6349509 100644 --- a/src/common/utils/IndexKeyUtils.h +++ b/src/common/utils/IndexKeyUtils.h @@ -143,7 +143,7 @@ class IndexKeyUtils final { return ""; } default: - LOG(ERROR) << "Unsupported default value type"; + LOG(FATAL) << "Unsupported default value type"; } return ""; } diff --git a/src/graph/optimizer/OptimizerUtils.cpp b/src/graph/optimizer/OptimizerUtils.cpp index 0537bd2fd1b..2d3f3afbe9e 100644 --- a/src/graph/optimizer/OptimizerUtils.cpp +++ b/src/graph/optimizer/OptimizerUtils.cpp @@ -26,531 +26,10 @@ using nebula::meta::cpp2::IndexItem; using nebula::storage::cpp2::IndexColumnHint; using nebula::storage::cpp2::IndexQueryContext; -using BVO = nebula::graph::OptimizerUtils::BoundValueOperator; using ExprKind = nebula::Expression::Kind; namespace nebula { namespace graph { - -Value OptimizerUtils::boundValue(const meta::cpp2::ColumnDef& col, - BoundValueOperator op, - const Value& v) { - switch (op) { - case BoundValueOperator::GREATER_THAN: { - return boundValueWithGT(col, v); - } - case BoundValueOperator::LESS_THAN: { - return boundValueWithLT(col, v); - } - case BoundValueOperator::MAX: { - return boundValueWithMax(col); - } - case BoundValueOperator::MIN: { - return boundValueWithMin(col); - } - } - return Value::kNullBadType; -} - -Value OptimizerUtils::boundValueWithGT(const meta::cpp2::ColumnDef& col, const Value& v) { - auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type()); - switch (type) { - case Value::Type::INT: { - if (v.getInt() == std::numeric_limits::max()) { - return v; - } else { - return v + 1; - } - } - case Value::Type::FLOAT: { - if (v.getFloat() > 0.0) { - if (v.getFloat() == std::numeric_limits::max()) { - return v; - } - } else if (v.getFloat() == 0.0) { - return Value(std::numeric_limits::min()); - } else { - if (v.getFloat() == -std::numeric_limits::min()) { - return Value(0.0); - } - } - return v.getFloat() + kEpsilon; - } - case Value::Type::STRING: { - if (!col.type.type_length_ref().has_value()) { - return Value::kNullBadType; - } - std::vector bytes(v.getStr().begin(), v.getStr().end()); - bytes.resize(*col.get_type().type_length_ref()); - for (size_t i = bytes.size();; i--) { - if (i > 0) { - if (bytes[i - 1]++ != 255) break; - } else { - return Value(std::string(*col.get_type().type_length_ref(), '\377')); - } - } - return Value(std::string(bytes.begin(), bytes.end())); - } - case Value::Type::DATE: { - if (Date(std::numeric_limits::max(), 12, 31) == v.getDate()) { - return v.getDate(); - } else if (Date() == v.getDate()) { - return Date(0, 1, 2); - } - auto d = v.getDate(); - if (d.day < 31) { - d.day += 1; - } else { - d.day = 1; - if (d.month < 12) { - d.month += 1; - } else { - d.month = 1; - if (d.year < std::numeric_limits::max()) { - d.year += 1; - } else { - return v.getDate(); - } - } - } - return Value(d); - } - case Value::Type::TIME: { - auto t = v.getTime(); - // Ignore the time zone. - if (t.microsec < 999999) { - t.microsec = t.microsec + 1; - } else { - t.microsec = 0; - if (t.sec < 59) { - t.sec += 1; - } else { - t.sec = 0; - if (t.minute < 59) { - t.minute += 1; - } else { - t.minute = 0; - if (t.hour < 23) { - t.hour += 1; - } else { - return v.getTime(); - } - } - } - } - return Value(t); - } - case Value::Type::DATETIME: { - auto dt = v.getDateTime(); - // Ignore the time zone. - if (dt.microsec < 999999) { - dt.microsec = dt.microsec + 1; - } else { - dt.microsec = 0; - if (dt.sec < 59) { - dt.sec += 1; - } else { - dt.sec = 0; - if (dt.minute < 59) { - dt.minute += 1; - } else { - dt.minute = 0; - if (dt.hour < 23) { - dt.hour += 1; - } else { - dt.hour = 0; - if (dt.day < 31) { - dt.day += 1; - } else { - dt.day = 1; - if (dt.month < 12) { - dt.month += 1; - } else { - dt.month = 1; - if (dt.year < std::numeric_limits::max()) { - dt.year += 1; - } else { - return v.getDateTime(); - } - } - } - } - } - } - } - return Value(dt); - } - case Value::Type::__EMPTY__: - case Value::Type::BOOL: - case Value::Type::NULLVALUE: - case Value::Type::VERTEX: - case Value::Type::EDGE: - case Value::Type::LIST: - case Value::Type::SET: - case Value::Type::MAP: - case Value::Type::DATASET: - case Value::Type::GEOGRAPHY: // TODO(jie) - case Value::Type::PATH: { - DLOG(FATAL) << "Not supported value type " << type << "for index."; - return Value::kNullBadType; - } - } - DLOG(FATAL) << "Unknown value type " << static_cast(type); - return Value::kNullBadType; -} - -Value OptimizerUtils::boundValueWithLT(const meta::cpp2::ColumnDef& col, const Value& v) { - auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type()); - switch (type) { - case Value::Type::INT: { - if (v.getInt() == std::numeric_limits::min()) { - return v; - } else { - return v - 1; - } - } - case Value::Type::FLOAT: { - if (v.getFloat() < 0.0) { - if (v.getFloat() == -std::numeric_limits::max()) { - return v; - } else if (v.getFloat() == -std::numeric_limits::min()) { - return Value(0.0); - } - } else if (v.getFloat() == 0.0) { - return Value(-std::numeric_limits::min()); - } - return v.getFloat() - kEpsilon; - } - case Value::Type::STRING: { - if (!col.type.type_length_ref().has_value()) { - return Value::kNullBadType; - } - std::vector bytes(v.getStr().begin(), v.getStr().end()); - bytes.resize(*col.get_type().type_length_ref()); - for (size_t i = bytes.size();; i--) { - if (i > 0) { - if (bytes[i - 1]-- != 0) break; - } else { - return Value(std::string(*col.get_type().type_length_ref(), '\0')); - } - } - return Value(std::string(bytes.begin(), bytes.end())); - } - case Value::Type::DATE: { - if (Date() == v.getDate()) { - return v.getDate(); - } - auto d = v.getDate(); - if (d.day > 1) { - d.day -= 1; - } else { - d.day = 31; - if (d.month > 1) { - d.month -= 1; - } else { - d.month = 12; - if (d.year > 1) { - d.year -= 1; - } else { - return v.getDate(); - } - } - } - return Value(d); - } - case Value::Type::TIME: { - if (Time() == v.getTime()) { - return v.getTime(); - } - auto t = v.getTime(); - if (t.microsec >= 1) { - t.microsec -= 1; - } else { - t.microsec = 999999; - if (t.sec >= 1) { - t.sec -= 1; - } else { - t.sec = 59; - if (t.minute >= 1) { - t.minute -= 1; - } else { - t.minute = 59; - if (t.hour >= 1) { - t.hour -= 1; - } else { - return v.getTime(); - } - } - } - } - return Value(t); - } - case Value::Type::DATETIME: { - if (DateTime() == v.getDateTime()) { - return v.getDateTime(); - } - auto dt = v.getDateTime(); - if (dt.microsec >= 1) { - dt.microsec -= 1; - } else { - dt.microsec = 999999; - if (dt.sec >= 1) { - dt.sec -= 1; - } else { - dt.sec = 59; - if (dt.minute >= 1) { - dt.minute -= 1; - } else { - dt.minute = 59; - if (dt.hour >= 1) { - dt.hour -= 1; - } else { - dt.hour = 23; - if (dt.day > 1) { - dt.day -= 1; - } else { - dt.day = 31; - if (dt.month > 1) { - dt.month -= 1; - } else { - dt.month = 12; - if (dt.year > 1) { - dt.year -= 1; - } else { - return v.getDateTime(); - } - } - } - } - } - } - } - return Value(dt); - } - case Value::Type::__EMPTY__: - case Value::Type::BOOL: - case Value::Type::NULLVALUE: - case Value::Type::VERTEX: - case Value::Type::EDGE: - case Value::Type::LIST: - case Value::Type::SET: - case Value::Type::MAP: - case Value::Type::DATASET: - case Value::Type::GEOGRAPHY: // TODO(jie) - case Value::Type::PATH: { - DLOG(FATAL) << "Not supported value type " << type << "for index."; - return Value::kNullBadType; - } - } - DLOG(FATAL) << "Unknown value type " << static_cast(type); - return Value::kNullBadType; -} - -Value OptimizerUtils::boundValueWithMax(const meta::cpp2::ColumnDef& col) { - auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type()); - switch (type) { - case Value::Type::INT: { - return Value(std::numeric_limits::max()); - } - case Value::Type::FLOAT: { - return Value(std::numeric_limits::max()); - } - case Value::Type::STRING: { - if (!col.type.type_length_ref().has_value()) { - return Value::kNullBadType; - } - return Value(std::string(*col.get_type().type_length_ref(), '\377')); - } - case Value::Type::DATE: { - Date d; - d.year = std::numeric_limits::max(); - d.month = 12; - d.day = 31; - return Value(d); - } - case Value::Type::TIME: { - Time dt; - dt.hour = 23; - dt.minute = 59; - dt.sec = 59; - dt.microsec = 999999; - return Value(dt); - } - case Value::Type::DATETIME: { - DateTime dt; - dt.year = std::numeric_limits::max(); - dt.month = 12; - dt.day = 31; - dt.hour = 23; - dt.minute = 59; - dt.sec = 59; - dt.microsec = 999999; - return Value(dt); - } - case Value::Type::__EMPTY__: - case Value::Type::BOOL: - case Value::Type::NULLVALUE: - case Value::Type::VERTEX: - case Value::Type::EDGE: - case Value::Type::LIST: - case Value::Type::SET: - case Value::Type::MAP: - case Value::Type::DATASET: - case Value::Type::GEOGRAPHY: // TODO(jie) - case Value::Type::PATH: { - DLOG(FATAL) << "Not supported value type " << type << "for index."; - return Value::kNullBadType; - } - } - DLOG(FATAL) << "Unknown value type " << static_cast(type); - return Value::kNullBadType; -} - -Value OptimizerUtils::boundValueWithMin(const meta::cpp2::ColumnDef& col) { - auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type()); - switch (type) { - case Value::Type::INT: { - return Value(std::numeric_limits::min()); - } - case Value::Type::FLOAT: { - return Value(-std::numeric_limits::max()); - } - case Value::Type::STRING: { - if (!col.type.type_length_ref().has_value()) { - return Value::kNullBadType; - } - return Value(std::string(*col.get_type().type_length_ref(), '\0')); - } - case Value::Type::DATE: { - return Value(Date()); - } - case Value::Type::TIME: { - return Value(Time()); - } - case Value::Type::DATETIME: { - return Value(DateTime()); - } - case Value::Type::__EMPTY__: - case Value::Type::BOOL: - case Value::Type::NULLVALUE: - case Value::Type::VERTEX: - case Value::Type::EDGE: - case Value::Type::LIST: - case Value::Type::SET: - case Value::Type::MAP: - case Value::Type::DATASET: - case Value::Type::GEOGRAPHY: // TODO(jie) - case Value::Type::PATH: { - DLOG(FATAL) << "Not supported value type " << type << "for index."; - return Value::kNullBadType; - } - } - DLOG(FATAL) << "Unknown value type " << static_cast(type); - return Value::kNullBadType; -} - -Value OptimizerUtils::normalizeValue(const meta::cpp2::ColumnDef& col, const Value& v) { - auto type = SchemaUtil::propTypeToValueType(col.get_type().get_type()); - switch (type) { - case Value::Type::INT: - case Value::Type::FLOAT: - case Value::Type::BOOL: - case Value::Type::DATE: - case Value::Type::TIME: - case Value::Type::DATETIME: { - return v; - } - case Value::Type::STRING: { - if (!col.type.type_length_ref().has_value()) { - return Value::kNullBadType; - } - return v; - } - case Value::Type::__EMPTY__: - case Value::Type::NULLVALUE: - case Value::Type::VERTEX: - case Value::Type::EDGE: - case Value::Type::LIST: - case Value::Type::SET: - case Value::Type::MAP: - case Value::Type::DATASET: - case Value::Type::GEOGRAPHY: // TODO(jie) - case Value::Type::PATH: { - DLOG(FATAL) << "Not supported value type " << type << "for index."; - return Value::kNullBadType; - } - } - DLOG(FATAL) << "Unknown value type " << static_cast(type); - return Value::kNullBadType; -} - -Status OptimizerUtils::boundValue(Expression::Kind kind, - const Value& val, - const meta::cpp2::ColumnDef& col, - Value& begin, - Value& end) { - if (val.type() != graph::SchemaUtil::propTypeToValueType(col.type.type)) { - return Status::SemanticError("Data type error of field : %s", col.get_name().c_str()); - } - switch (kind) { - case Expression::Kind::kRelLE: { - // if c1 <= int(5) , the range pair should be (min, 6) - // if c1 < int(5), the range pair should be (min, 5) - auto v = OptimizerUtils::boundValue(col, BoundValueOperator::GREATER_THAN, val); - if (v == Value::kNullBadType) { - LOG(ERROR) << "Get bound value error. field : " << col.get_name(); - return Status::Error("Get bound value error. field : %s", col.get_name().c_str()); - } - // where c <= 1 and c <= 2 , 1 should be valid. - if (end.empty()) { - end = v; - } else { - end = v < end ? v : end; - } - break; - } - case Expression::Kind::kRelGE: { - // where c >= 1 and c >= 2 , 2 should be valid. - if (begin.empty()) { - begin = val; - } else { - begin = val < begin ? begin : val; - } - break; - } - case Expression::Kind::kRelLT: { - // c < 5 and c < 6 , 5 should be valid. - if (end.empty()) { - end = val; - } else { - end = val < end ? val : end; - } - break; - } - case Expression::Kind::kRelGT: { - // if c >= 5, the range pair should be (5, max) - // if c > 5, the range pair should be (6, max) - auto v = OptimizerUtils::boundValue(col, BoundValueOperator::GREATER_THAN, val); - if (v == Value::kNullBadType) { - LOG(ERROR) << "Get bound value error. field : " << col.get_name(); - return Status::Error("Get bound value error. field : %s", col.get_name().c_str()); - } - // where c > 1 and c > 2 , 2 should be valid. - if (begin.empty()) { - begin = v; - } else { - begin = v < begin ? begin : v; - } - break; - } - default: { - // TODO(yee): Semantic error - return Status::Error("Invalid expression kind."); - } - } - return Status::OK(); -} - namespace { // IndexScore is used to find the optimal index. The larger the score, the @@ -589,16 +68,6 @@ struct IndexResult { } }; -Status checkValue(const ColumnDef& field, BVO bvo, Value* value) { - if (value->empty()) { - *value = OptimizerUtils::boundValue(field, bvo, Value()); - if (value->isBadNull()) { - return Status::Error("Get bound value error. field : %s", field.get_name().c_str()); - } - } - return Status::OK(); -} - Status handleRangeIndex(const meta::cpp2::ColumnDef& field, const Expression* expr, const Value& value, @@ -606,12 +75,25 @@ Status handleRangeIndex(const meta::cpp2::ColumnDef& field, if (field.get_type().get_type() == nebula::cpp2::PropertyType::BOOL) { return Status::Error("Range scan for bool type is illegal"); } - Value begin, end; - NG_RETURN_IF_ERROR(OptimizerUtils::boundValue(expr->kind(), value, field, begin, end)); - NG_RETURN_IF_ERROR(checkValue(field, BVO::MIN, &begin)); - NG_RETURN_IF_ERROR(checkValue(field, BVO::MAX, &end)); - hint->set_begin_value(std::move(begin)); - hint->set_end_value(std::move(end)); + bool include = false; + switch (expr->kind()) { + case Expression::Kind::kRelGE: + include = true; + [[fallthrough]]; + case Expression::Kind::kRelGT: + hint->set_begin_value(value); + hint->set_include_begin(include); + break; + case Expression::Kind::kRelLE: + include = true; + [[fallthrough]]; + case Expression::Kind::kRelLT: + hint->set_end_value(value); + hint->set_include_end(include); + break; + default: + break; + } hint->set_scan_type(storage::cpp2::ScanType::RANGE); hint->set_column_name(field.get_name()); return Status::OK(); @@ -620,7 +102,7 @@ Status handleRangeIndex(const meta::cpp2::ColumnDef& field, void handleEqualIndex(const ColumnDef& field, const Value& value, IndexColumnHint* hint) { hint->set_scan_type(storage::cpp2::ScanType::PREFIX); hint->set_column_name(field.get_name()); - hint->set_begin_value(OptimizerUtils::normalizeValue(field, value)); + hint->set_begin_value(value); } StatusOr selectRelExprIndex(const ColumnDef& field, @@ -682,40 +164,42 @@ StatusOr selectRelExprIndex(const RelationalExpression* expr, const return result; } -bool mergeRangeColumnHints(const ColumnDef& field, - const std::vector& hints, - Value* begin, - Value* end) { +bool mergeRangeColumnHints(const std::vector& hints, + std::pair* begin, + std::pair* end) { for (auto& h : hints) { switch (h.score) { case IndexScore::kRange: { if (h.hint.begin_value_ref().is_set()) { - const auto& value = h.hint.get_begin_value(); - if (begin->empty() || *begin < value) { - *begin = value; + auto tmp = std::make_pair(h.hint.get_begin_value(), h.hint.get_include_begin()); + if (begin->first.empty()) { + *begin = std::move(tmp); + } else { + OptimizerUtils::compareAndSwapBound(tmp, *begin); } } if (h.hint.end_value_ref().is_set()) { - const auto& value = h.hint.get_end_value(); - if (end->empty() || *end > value) { - *end = value; + auto tmp = std::make_pair(h.hint.get_end_value(), h.hint.get_include_end()); + if (end->first.empty()) { + *end = std::move(tmp); + } else { + OptimizerUtils::compareAndSwapBound(*end, tmp); } } break; } case IndexScore::kPrefix: { - // Prefix value <=> range [value, value] - const auto& value = h.hint.get_begin_value(); - Value b, e; - auto status = OptimizerUtils::boundValue(ExprKind::kRelGE, value, field, b, e); - if (!status.ok()) return false; - if (begin->empty() || *begin < b) { - *begin = b; + auto tmp = std::make_pair(h.hint.get_begin_value(), true); + if (begin->first.empty()) { + *begin = tmp; + } else { + OptimizerUtils::compareAndSwapBound(tmp, *begin); } - status = OptimizerUtils::boundValue(ExprKind::kRelLE, value, field, b, e); - if (!status.ok()) return false; - if (end->empty() || *end > e) { - *end = e; + tmp = std::make_pair(h.hint.get_begin_value(), true); + if (end->first.empty()) { + *end = std::move(tmp); + } else { + OptimizerUtils::compareAndSwapBound(*end, tmp); } break; } @@ -724,7 +208,15 @@ bool mergeRangeColumnHints(const ColumnDef& field, } } } - return !(*begin >= *end); + bool ret = true; + if (begin->first > end->first) { + ret = false; + } else if (begin->first == end->first) { + if (!(begin->second && end->second)) { + ret = false; + } + } + return ret; } bool getIndexColumnHintInExpr(const ColumnDef& field, @@ -748,34 +240,33 @@ bool getIndexColumnHintInExpr(const ColumnDef& field, *hint = hints.front(); return true; } - Value begin, end; - if (!mergeRangeColumnHints(field, hints, &begin, &end)) { + std::pair begin, end; + if (!mergeRangeColumnHints(hints, &begin, &end)) { return false; } ScoredColumnHint h; h.hint.set_column_name(field.get_name()); - // Change scan type to prefix if begin + 1 == end - Value newBegin, newEnd; - auto status = OptimizerUtils::boundValue(ExprKind::kRelGT, begin, field, newBegin, newEnd); - if (!status.ok()) { - // TODO(yee): differentiate between empty set and invalid index to use - return false; - } - if (newBegin < end) { - // end > newBegin > begin + if (begin.first < end.first) { h.hint.set_scan_type(storage::cpp2::ScanType::RANGE); - h.hint.set_begin_value(std::move(begin)); - h.hint.set_end_value(std::move(end)); + h.hint.set_begin_value(std::move(begin.first)); + h.hint.set_end_value(std::move(end.first)); + h.hint.set_include_begin(begin.second); + h.hint.set_include_end(end.second); h.score = IndexScore::kRange; - } else if (newBegin == end) { - // end == neBegin == begin + 1 - h.hint.set_scan_type(storage::cpp2::ScanType::PREFIX); - h.hint.set_begin_value(std::move(begin)); - h.score = IndexScore::kPrefix; + } else if (begin.first == end.first) { + if (begin.second == false && end.second == false) { + return false; + } else { + h.hint.set_scan_type(storage::cpp2::ScanType::RANGE); + h.hint.set_begin_value(std::move(begin.first)); + h.hint.set_end_value(std::move(end.first)); + h.hint.set_include_begin(begin.second); + h.hint.set_include_end(end.second); + h.score = IndexScore::kRange; + } } else { return false; } - *hint = std::move(h); return true; } @@ -949,5 +440,15 @@ void OptimizerUtils::copyIndexScanData(const nebula::graph::IndexScan* from, to->setFilter(from->filter() == nullptr ? nullptr : from->filter()->clone()); } +Status OptimizerUtils::compareAndSwapBound(std::pair& a, std::pair& b) { + if (a.first > b.first) { + std::swap(a, b); + } else if (a.first < b.first) { // do nothing + } else if (a.second > b.second) { + std::swap(a, b); + } + return Status::OK(); +} + } // namespace graph } // namespace nebula diff --git a/src/graph/optimizer/OptimizerUtils.h b/src/graph/optimizer/OptimizerUtils.h index 42461df8ff8..d628c83020f 100644 --- a/src/graph/optimizer/OptimizerUtils.h +++ b/src/graph/optimizer/OptimizerUtils.h @@ -30,35 +30,10 @@ class IndexScan; class OptimizerUtils { public: - enum class BoundValueOperator { - GREATER_THAN = 0, - LESS_THAN, - MAX, - MIN, - }; - OptimizerUtils() = delete; - static Value boundValue(const meta::cpp2::ColumnDef& col, - BoundValueOperator op, - const Value& v = Value()); - - static Value boundValueWithGT(const meta::cpp2::ColumnDef& col, const Value& v); - - static Value boundValueWithLT(const meta::cpp2::ColumnDef& col, const Value& v); - - static Value boundValueWithMax(const meta::cpp2::ColumnDef& col); - - static Value boundValueWithMin(const meta::cpp2::ColumnDef& col); - - static Value normalizeValue(const meta::cpp2::ColumnDef& col, const Value& v); - - static Status boundValue(Expression::Kind kind, - const Value& val, - const meta::cpp2::ColumnDef& col, - Value& begin, - Value& end); - + // Compare `a` and `b`, if `a`>`b` then swap a and b.That means `b`>=`a` after call this function. + static Status compareAndSwapBound(std::pair& a, std::pair& b); static void eraseInvalidIndexItems( int32_t schemaId, std::vector>* indexItems); diff --git a/src/graph/optimizer/rule/IndexScanRule.cpp b/src/graph/optimizer/rule/IndexScanRule.cpp index 53b8d029727..399a4987ee1 100644 --- a/src/graph/optimizer/rule/IndexScanRule.cpp +++ b/src/graph/optimizer/rule/IndexScanRule.cpp @@ -199,13 +199,38 @@ Status IndexScanRule::appendIQCtx(const IndexItem& index, IndexQueryCtx& iqctx) } \ } while (0) +inline bool verifyType(const Value& val) { + switch (val.type()) { + case Value::Type::__EMPTY__: + case Value::Type::NULLVALUE: + case Value::Type::VERTEX: + case Value::Type::EDGE: + case Value::Type::LIST: + case Value::Type::SET: + case Value::Type::MAP: + case Value::Type::DATASET: + case Value::Type::GEOGRAPHY: // TODO(jie) + case Value::Type::PATH: { + DLOG(FATAL) << "Not supported value type " << val.type() << "for index."; + return false; + } break; + default: { + return true; + } + } +} Status IndexScanRule::appendColHint(std::vector& hints, const FilterItems& items, const meta::cpp2::ColumnDef& col) const { + // CHECK(false); IndexColumnHint hint; - Value begin, end; + std::pair begin, end; bool isRangeScan = true; for (const auto& item : items.items) { + if (!verifyType(item.value_)) { + return Status::SemanticError( + fmt::format("Not supported value type {} for index.", item.value_.type())); + } if (item.relOP_ == Expression::Kind::kRelEQ) { // check the items, don't allow where c1 == 1 and c1 == 2 and c1 > 3.... // If EQ item appears, only one element is allowed @@ -213,7 +238,7 @@ Status IndexScanRule::appendColHint(std::vector& hints, return Status::SemanticError(); } isRangeScan = false; - begin = OptimizerUtils::normalizeValue(col, item.value_); + begin = {item.value_, true}; break; } // because only type for bool is true/false, which can not satisify [start, @@ -221,24 +246,54 @@ Status IndexScanRule::appendColHint(std::vector& hints, if (col.get_type().get_type() == nebula::cpp2::PropertyType::BOOL) { return Status::SemanticError("Range scan for bool type is illegal"); } - NG_RETURN_IF_ERROR(OptimizerUtils::boundValue(item.relOP_, item.value_, col, begin, end)); + if (item.value_.type() != graph::SchemaUtil::propTypeToValueType(col.type.type)) { + return Status::SemanticError("Data type error of field : %s", col.get_name().c_str()); + } + bool include = false; + switch (item.relOP_) { + case Expression::Kind::kRelLE: + include = true; + [[fallthrough]]; + case Expression::Kind::kRelLT: { + if (end.first.empty()) { + end.first = item.value_; + end.second = include; + } else { + auto tmp = std::make_pair(item.value_, include); + OptimizerUtils::compareAndSwapBound(end, tmp); + } + } break; + case Expression::Kind::kRelGE: + include = true; + [[fallthrough]]; + case Expression::Kind::kRelGT: { + if (begin.first.empty()) { + begin.first = item.value_; + begin.second = include; + } else { + auto tmp = std::make_pair(item.value_, include); + OptimizerUtils::compareAndSwapBound(tmp, begin); + } + } break; + default: + return Status::Error("Invalid expression kind."); + } } if (isRangeScan) { - if (begin.empty()) { - begin = OptimizerUtils::boundValue(col, BVO::MIN, Value()); - CHECK_BOUND_VALUE(begin, col.get_name()); + if (!begin.first.empty()) { + hint.set_begin_value(begin.first); + hint.set_include_begin(begin.second); } - if (end.empty()) { - end = OptimizerUtils::boundValue(col, BVO::MAX, Value()); - CHECK_BOUND_VALUE(end, col.get_name()); + if (!end.first.empty()) { + hint.set_end_value(end.first); + hint.set_include_end(end.second); } hint.set_scan_type(storage::cpp2::ScanType::RANGE); - hint.set_end_value(std::move(end)); } else { + hint.set_begin_value(begin.first); hint.set_scan_type(storage::cpp2::ScanType::PREFIX); } - hint.set_begin_value(std::move(begin)); hint.set_column_name(col.get_name()); hints.emplace_back(std::move(hint)); return Status::OK(); diff --git a/src/graph/optimizer/rule/IndexScanRule.h b/src/graph/optimizer/rule/IndexScanRule.h index f906a7049e4..93022907972 100644 --- a/src/graph/optimizer/rule/IndexScanRule.h +++ b/src/graph/optimizer/rule/IndexScanRule.h @@ -15,7 +15,6 @@ namespace opt { using graph::QueryContext; using storage::cpp2::IndexColumnHint; using storage::cpp2::IndexQueryContext; -using BVO = graph::OptimizerUtils::BoundValueOperator; using IndexItem = std::shared_ptr; class OptContext; diff --git a/src/graph/optimizer/test/CMakeLists.txt b/src/graph/optimizer/test/CMakeLists.txt index 29c478e66c8..eaec4363bf6 100644 --- a/src/graph/optimizer/test/CMakeLists.txt +++ b/src/graph/optimizer/test/CMakeLists.txt @@ -48,20 +48,6 @@ set(OPTIMIZER_TEST_LIB $ ) -nebula_add_test( - NAME - index_bound_value_test - SOURCES - IndexBoundValueTest.cpp - OBJECTS - ${OPTIMIZER_TEST_LIB} - LIBRARIES - ${PROXYGEN_LIBRARIES} - ${THRIFT_LIBRARIES} - gtest - gtest_main -) - nebula_add_test( NAME index_scan_rule_test diff --git a/src/graph/optimizer/test/IndexBoundValueTest.cpp b/src/graph/optimizer/test/IndexBoundValueTest.cpp deleted file mode 100644 index 7a9b2d7896b..00000000000 --- a/src/graph/optimizer/test/IndexBoundValueTest.cpp +++ /dev/null @@ -1,315 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include - -#include "graph/optimizer/OptimizerUtils.h" - -namespace nebula { -namespace graph { - -using OP = OptimizerUtils::BoundValueOperator; -using nebula::cpp2::PropertyType; - -TEST(IndexBoundValueTest, StringTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::FIXED_STRING); - typeDef.set_type_length(8); - col.set_type(std::move(typeDef)); - } - std::vector max = {255, 255, 255, 255, 255, 255, 255, 255}; - std::vector min = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'}; - auto maxStr = std::string(max.begin(), max.end()); - auto minStr = std::string(min.begin(), min.end()); - EXPECT_EQ(maxStr, OptimizerUtils::boundValue(col, OP::MAX, Value("aa")).getStr()); - EXPECT_EQ(maxStr, OptimizerUtils::boundValue(col, OP::MAX, Value(maxStr)).getStr()); - EXPECT_EQ(minStr, OptimizerUtils::boundValue(col, OP::MIN, Value("aa")).getStr()); - EXPECT_EQ(minStr, OptimizerUtils::boundValue(col, OP::MIN, Value("")).getStr()); - - std::string retVal = OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value("aa")).getStr(); - std::vector expected = {'a', 'a', '\0', '\0', '\0', '\0', '\0', '\001'}; - EXPECT_EQ(std::string(expected.begin(), expected.end()), retVal); - - EXPECT_EQ(maxStr, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(maxStr)).getStr()); - - retVal = OptimizerUtils::boundValue(col, OP::LESS_THAN, Value("aa")).getStr(); - expected = {'a', '`', 255, 255, 255, 255, 255, 255}; - EXPECT_EQ(std::string(expected.begin(), expected.end()), retVal); - - retVal = OptimizerUtils::boundValue(col, OP::LESS_THAN, Value("")).getStr(); - EXPECT_EQ(minStr, retVal); - - retVal = OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(minStr)).getStr(); - EXPECT_EQ(minStr, retVal); - - { - auto actual = "ABCDEFGHIJKLMN"; - auto expectGT = "ABCDEFGI"; - auto expectLT = "ABCDEFGG"; - EXPECT_EQ(expectGT, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actual)).getStr()); - EXPECT_EQ(expectLT, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(actual)).getStr()); - } - { - std::vector act = {255, 255, 255, 254, 255, 255, 255, 255}; - std::vector exp = {255, 255, 255, 255, 0, 0, 0, 0}; - auto actStr = std::string(act.begin(), act.end()); - auto expStr = std::string(exp.begin(), exp.end()); - EXPECT_EQ(expStr, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actStr)).getStr()); - } - { - std::vector act = {255, 255, 255, 0, 0, 0, 0, 0}; - std::vector exp = {255, 255, 254, 255, 255, 255, 255, 255}; - auto actStr = std::string(act.begin(), act.end()); - auto expStr = std::string(exp.begin(), exp.end()); - EXPECT_EQ(expStr, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(actStr)).getStr()); - } -} - -TEST(IndexBoundValueTest, IntTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::INT64); - col.set_type(std::move(typeDef)); - } - auto maxInt = std::numeric_limits::max(); - auto minInt = std::numeric_limits::min(); - EXPECT_EQ(maxInt, OptimizerUtils::boundValue(col, OP::MAX, Value(maxInt)).getInt()); - EXPECT_EQ(maxInt, OptimizerUtils::boundValue(col, OP::MAX, Value(1L)).getInt()); - EXPECT_EQ(minInt, OptimizerUtils::boundValue(col, OP::MIN, Value(minInt)).getInt()); - EXPECT_EQ(minInt, OptimizerUtils::boundValue(col, OP::MIN, Value(1L)).getInt()); - - EXPECT_EQ(minInt, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(minInt)).getInt()); - EXPECT_EQ(maxInt, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(maxInt)).getInt()); - EXPECT_EQ(minInt + 1, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(minInt)).getInt()); - EXPECT_EQ(maxInt - 1, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(maxInt)).getInt()); - - EXPECT_EQ(1, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(0L)).getInt()); - EXPECT_EQ(-1, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(0L)).getInt()); - - EXPECT_EQ(6, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(5L)).getInt()); - EXPECT_EQ(4, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(5L)).getInt()); -} - -TEST(IndexBoundValueTest, DoubleTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::DOUBLE); - col.set_type(std::move(typeDef)); - } - auto maxDouble = std::numeric_limits::max(); - auto minDouble = std::numeric_limits::min(); - EXPECT_EQ(maxDouble, OptimizerUtils::boundValue(col, OP::MAX, Value(maxDouble)).getFloat()); - EXPECT_EQ(maxDouble, OptimizerUtils::boundValue(col, OP::MAX, Value(1.1)).getFloat()); - EXPECT_EQ(-maxDouble, OptimizerUtils::boundValue(col, OP::MIN, Value(minDouble)).getFloat()); - EXPECT_EQ(-maxDouble, OptimizerUtils::boundValue(col, OP::MIN, Value(1.1)).getFloat()); - - EXPECT_EQ(0.0, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(-minDouble)).getFloat()); - EXPECT_EQ(maxDouble - kEpsilon, - OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(maxDouble)).getFloat()); - EXPECT_EQ(-minDouble, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(0.0)).getFloat()); - - EXPECT_EQ(5.1 - kEpsilon, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(5.1))); - - EXPECT_EQ(-(5.1 + kEpsilon), OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(-5.1))); - - EXPECT_EQ(maxDouble, - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(maxDouble)).getFloat()); - EXPECT_EQ(0.0, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(-minDouble)).getFloat()); - - EXPECT_EQ(minDouble, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(0.0)).getFloat()); - - EXPECT_EQ(5.1 + kEpsilon, - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(5.1)).getFloat()); - - EXPECT_EQ(-(5.1 - kEpsilon), - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(-5.1)).getFloat()); -} - -TEST(IndexBoundValueTest, DateTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::DATE); - col.set_type(std::move(typeDef)); - } - auto maxYear = std::numeric_limits::max(); - EXPECT_EQ(Date(maxYear, 12, 31), OptimizerUtils::boundValue(col, OP::MAX, Value(Date()))); - EXPECT_EQ(Date(), OptimizerUtils::boundValue(col, OP::MIN, Value(Date(maxYear, 12, 31)))); - EXPECT_EQ(Date(2021, 1, 1), - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(Date(2020, 12, 31)))); - EXPECT_EQ(Date(maxYear, 12, 31), - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(Date(maxYear, 12, 31)))); - EXPECT_EQ(Date(2020, 1, 2), - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(Date(2020, 1, 1)))); - EXPECT_EQ(Date(0, 1, 2), OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(Date()))); - EXPECT_EQ(Date(), OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(Date()))); - EXPECT_EQ(Date(2019, 12, 30), - OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(Date(2019, 12, 31)))); - EXPECT_EQ(Date(2018, 12, 31), - OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(Date(2019, 1, 1)))); -} - -TEST(IndexBoundValueTest, TimeTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::TIME); - col.set_type(std::move(typeDef)); - } - Time maxT{23, 59, 59, 999999}; - - Time minT = Time(); - - EXPECT_EQ(maxT, OptimizerUtils::boundValue(col, OP::MAX, Value(maxT)).getTime()); - EXPECT_EQ(minT, OptimizerUtils::boundValue(col, OP::MIN, Value(maxT)).getTime()); - EXPECT_EQ(maxT, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(maxT)).getTime()); - - { - Time actual, expect; - actual.microsec = 999999; - actual.sec = 59; - actual.minute = 59; - actual.hour = 22; - - expect.microsec = 0; - expect.sec = 0; - expect.minute = 0; - expect.hour = 23; - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actual)).getTime()); - } - { - Time actual, expect; - actual.microsec = 999999; - actual.sec = 34; - actual.minute = 59; - actual.hour = 23; - - expect.microsec = 0; - expect.sec = 35; - expect.minute = 59; - expect.hour = 23; - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actual)).getTime()); - } - { - Time expect = Time(); - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(expect))); - } - { - Time actual, expect; - actual.microsec = 999999; - actual.sec = 34; - actual.minute = 59; - actual.hour = 23; - - expect.microsec = 999998; - expect.sec = 34; - expect.minute = 59; - expect.hour = 23; - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(actual)).getTime()); - } -} - -TEST(IndexBoundValueTest, DateTimeTest) { - meta::cpp2::ColumnDef col; - { - meta::cpp2::ColumnTypeDef typeDef; - typeDef.set_type(PropertyType::DATETIME); - col.set_type(std::move(typeDef)); - } - DateTime maxDT; - { - maxDT.microsec = 999999; - maxDT.sec = 59; - maxDT.minute = 59; - maxDT.hour = 23; - maxDT.day = 31; - maxDT.month = 12; - maxDT.year = std::numeric_limits::max(); - } - - DateTime minDT = DateTime(); - - EXPECT_EQ(maxDT, OptimizerUtils::boundValue(col, OP::MAX, Value(maxDT)).getDateTime()); - EXPECT_EQ(minDT, OptimizerUtils::boundValue(col, OP::MIN, Value(maxDT)).getDateTime()); - EXPECT_EQ(maxDT, OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(maxDT)).getDateTime()); - - { - DateTime actual, expect; - actual.microsec = 999999; - actual.sec = 59; - actual.minute = 59; - actual.hour = 23; - actual.day = 31; - actual.month = 12; - actual.year = 2020; - - expect.microsec = 0; - expect.sec = 0; - expect.minute = 0; - expect.hour = 0; - expect.day = 1; - expect.month = 1; - expect.year = 2021; - EXPECT_EQ(expect, - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actual)).getDateTime()); - } - { - DateTime actual, expect; - actual.microsec = 999999; - actual.sec = 34; - actual.minute = 59; - actual.hour = 23; - actual.day = 31; - actual.month = 12; - actual.year = 2020; - - expect.microsec = 0; - expect.sec = 35; - expect.minute = 59; - expect.hour = 23; - expect.day = 31; - expect.month = 12; - expect.year = 2020; - EXPECT_EQ(expect, - OptimizerUtils::boundValue(col, OP::GREATER_THAN, Value(actual)).getDateTime()); - } - { - DateTime expect = DateTime(); - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(expect))); - } - { - DateTime actual, expect; - actual.microsec = 999999; - actual.sec = 34; - actual.minute = 60; - actual.hour = 24; - actual.day = 31; - actual.month = 12; - actual.year = 2020; - - expect.microsec = 999998; - expect.sec = 34; - expect.minute = 60; - expect.hour = 24; - expect.day = 31; - expect.month = 12; - expect.year = 2020; - EXPECT_EQ(expect, OptimizerUtils::boundValue(col, OP::LESS_THAN, Value(actual)).getDateTime()); - } -} - -} // namespace graph -} // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - return RUN_ALL_TESTS(); -} diff --git a/src/graph/optimizer/test/IndexScanRuleTest.cpp b/src/graph/optimizer/test/IndexScanRuleTest.cpp index 13776f28950..9c3d790f7b2 100644 --- a/src/graph/optimizer/test/IndexScanRuleTest.cpp +++ b/src/graph/optimizer/test/IndexScanRuleTest.cpp @@ -14,74 +14,6 @@ using nebula::graph::OptimizerUtils; namespace nebula { namespace opt { -TEST(IndexScanRuleTest, BoundValueTest) { - meta::cpp2::ColumnDef col; - IndexScanRule::FilterItems items; - { - Value begin, end; - col.set_name("col1"); - col.type.set_type(PropertyType::INT64); - // col > 1 and col < 5 - items.addItem("col1", RelationalExpression::Kind::kRelGT, Value(1L)); - items.addItem("col1", RelationalExpression::Kind::kRelLT, Value(5L)); - for (const auto& item : items.items) { - auto ret = OptimizerUtils::boundValue(item.relOP_, item.value_, col, begin, end); - ASSERT_TRUE(ret.ok()); - } - // Expect begin = 2 , end = 5; - EXPECT_EQ((Value(2L)), begin); - EXPECT_EQ((Value(5L)), end); - } - { - Value begin, end; - items.items.clear(); - col.set_name("col1"); - col.type.set_type(PropertyType::INT64); - // col > 1 and col > 6 - items.addItem("col1", RelationalExpression::Kind::kRelGT, Value(1L)); - items.addItem("col1", RelationalExpression::Kind::kRelGT, Value(6L)); - for (const auto& item : items.items) { - auto ret = OptimizerUtils::boundValue(item.relOP_, item.value_, col, begin, end); - ASSERT_TRUE(ret.ok()); - } - // Expect begin = 7 - EXPECT_EQ(Value(7L), begin); - EXPECT_EQ(Value(), end); - } - { - Value begin, end; - items.items.clear(); - col.set_name("col1"); - col.type.set_type(PropertyType::INT64); - // col > 1 and col >= 6 - items.addItem("col1", RelationalExpression::Kind::kRelGT, Value(1L)); - items.addItem("col1", RelationalExpression::Kind::kRelGE, Value(6L)); - for (const auto& item : items.items) { - auto ret = OptimizerUtils::boundValue(item.relOP_, item.value_, col, begin, end); - ASSERT_TRUE(ret.ok()); - } - // Expect begin = 6 - EXPECT_EQ(Value(6L), begin); - EXPECT_EQ(Value(), end); - } - { - Value begin, end; - items.items.clear(); - col.set_name("col1"); - col.type.set_type(PropertyType::INT64); - // col < 1 and col <= 6 - items.addItem("col1", RelationalExpression::Kind::kRelLT, Value(1L)); - items.addItem("col1", RelationalExpression::Kind::kRelLE, Value(6L)); - for (const auto& item : items.items) { - auto ret = OptimizerUtils::boundValue(item.relOP_, item.value_, col, begin, end); - ASSERT_TRUE(ret.ok()); - } - // Expect end = 1 - EXPECT_EQ(Value(1L), end); - EXPECT_EQ(Value(), begin); - } -} - TEST(IndexScanRuleTest, IQCtxTest) { auto* inst = std::move(IndexScanRule::kInstance).get(); auto* instance = static_cast(inst); @@ -124,8 +56,9 @@ TEST(IndexScanRuleTest, IQCtxTest) { const auto& colHints = iqctx.begin()->get_column_hints(); ASSERT_EQ("col0", colHints.begin()->get_column_name()); ASSERT_EQ(storage::cpp2::ScanType::RANGE, colHints.begin()->get_scan_type()); - ASSERT_EQ(Value(std::numeric_limits::min()), colHints.begin()->get_begin_value()); - ASSERT_EQ(Value(1L), colHints.begin()->get_end_value()); + ASSERT_EQ(1, colHints.begin()->get_end_value()); + ASSERT_FALSE(colHints.begin()->get_include_end()); + ASSERT_FALSE(colHints.begin()->begin_value_ref().is_set()); } // setup FilterItems col0 > 1 and col1 <= 2 and col1 > -1 and col2 > 3 @@ -152,8 +85,9 @@ TEST(IndexScanRuleTest, IQCtxTest) { auto hint = colHints[0]; ASSERT_EQ("col0", hint.get_column_name()); ASSERT_EQ(storage::cpp2::ScanType::RANGE, hint.get_scan_type()); - ASSERT_EQ(Value(2L), hint.get_begin_value()); - ASSERT_EQ(Value(std::numeric_limits::max()), hint.get_end_value()); + ASSERT_EQ(1, hint.get_begin_value()); + ASSERT_FALSE(hint.get_include_begin()); + ASSERT_FALSE(hint.end_value_ref().is_set()); } } @@ -188,8 +122,10 @@ TEST(IndexScanRuleTest, IQCtxTest) { auto hint = colHints[1]; ASSERT_EQ("col1", hint.get_column_name()); ASSERT_EQ(storage::cpp2::ScanType::RANGE, hint.get_scan_type()); - ASSERT_EQ(Value(0L), hint.get_begin_value()); - ASSERT_EQ(Value(3L), hint.get_end_value()); + ASSERT_EQ(Value(-1L), hint.get_begin_value()); + ASSERT_FALSE(hint.get_include_begin()); + ASSERT_EQ(Value(2L), hint.get_end_value()); + ASSERT_TRUE(hint.get_include_end()); } } // setup FilterItems col0 == 1 and col1 == 2 and col2 == -1 and col3 > 3 @@ -235,8 +171,8 @@ TEST(IndexScanRuleTest, IQCtxTest) { auto hint = colHints[3]; ASSERT_EQ("col3", hint.get_column_name()); ASSERT_EQ(storage::cpp2::ScanType::RANGE, hint.get_scan_type()); - ASSERT_EQ(Value(4L), hint.get_begin_value()); - ASSERT_EQ(Value(std::numeric_limits::max()), hint.get_end_value()); + ASSERT_EQ(Value(3L), hint.get_begin_value()); + ASSERT_FALSE(hint.get_include_begin()); } } } @@ -261,7 +197,8 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(std::numeric_limits::min(), *hint.begin_value_ref()); + EXPECT_FALSE(hint.begin_value_ref().is_set()); + EXPECT_FALSE(hint.get_include_end()); EXPECT_EQ(2, *hint.end_value_ref()); } { @@ -275,8 +212,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(std::numeric_limits::min(), *hint.begin_value_ref()); - EXPECT_EQ(3, *hint.end_value_ref()); + EXPECT_FALSE(hint.begin_value_ref().is_set()); + EXPECT_TRUE(hint.get_include_end()); + EXPECT_EQ(2, *hint.end_value_ref()); } { std::vector hints; @@ -289,8 +227,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(3, *hint.begin_value_ref()); - EXPECT_EQ(std::numeric_limits::max(), *hint.end_value_ref()); + EXPECT_FALSE(hint.end_value_ref().is_set()); + EXPECT_FALSE(hint.get_include_begin()); + EXPECT_EQ(2, *hint.begin_value_ref()); } { std::vector hints; @@ -303,8 +242,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); + EXPECT_FALSE(hint.end_value_ref().is_set()); + EXPECT_TRUE(hint.get_include_begin()); EXPECT_EQ(2, *hint.begin_value_ref()); - EXPECT_EQ(std::numeric_limits::max(), *hint.end_value_ref()); } { std::vector hints; @@ -318,8 +258,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(3, *hint.begin_value_ref()); - EXPECT_EQ(5, *hint.end_value_ref()); + EXPECT_EQ(2, hint.get_begin_value()); + EXPECT_FALSE(hint.get_include_begin()); + EXPECT_EQ(5, hint.get_end_value()); + EXPECT_FALSE(hint.get_include_end()); } { std::vector hints; @@ -333,8 +275,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_int", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(2, *hint.begin_value_ref()); - EXPECT_EQ(6, *hint.end_value_ref()); + EXPECT_EQ(2, hint.get_begin_value()); + EXPECT_TRUE(hint.get_include_begin()); + EXPECT_EQ(5, hint.get_end_value()); + EXPECT_TRUE(hint.get_include_end()); } } { @@ -373,8 +317,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_double", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(-std::numeric_limits::max(), *hint.begin_value_ref()); - EXPECT_EQ(1, *hint.end_value_ref()); + EXPECT_EQ(1.0, hint.get_end_value()); + EXPECT_FALSE(hint.get_include_end()); + EXPECT_FALSE(hint.begin_value_ref().is_set()); } { std::vector hints; @@ -387,8 +332,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_double", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(-std::numeric_limits::max(), *hint.begin_value_ref()); - EXPECT_EQ(3 + kEpsilon, *hint.end_value_ref()); + EXPECT_EQ(3.0, hint.get_end_value()); + EXPECT_TRUE(hint.get_include_end()); + EXPECT_FALSE(hint.begin_value_ref().is_set()); } { std::vector hints; @@ -401,8 +347,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_double", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(1, *hint.begin_value_ref()); - EXPECT_EQ(std::numeric_limits::max(), *hint.end_value_ref()); + EXPECT_EQ(1.0, hint.get_begin_value()); + EXPECT_TRUE(hint.get_include_begin()); + EXPECT_FALSE(hint.end_value_ref().is_set()); } { std::vector hints; @@ -416,8 +363,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_double", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(1 + kEpsilon, *hint.begin_value_ref()); - EXPECT_EQ(5 + kEpsilon, *hint.end_value_ref()); + EXPECT_FALSE(hint.get_include_begin()); + EXPECT_EQ(1, hint.get_begin_value()); + EXPECT_TRUE(hint.get_include_end()); + EXPECT_EQ(5, hint.get_end_value()); } } { @@ -437,7 +386,8 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_str", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ(std::string(len, '\0'), *hint.begin_value_ref()); + EXPECT_FALSE(hint.begin_value_ref().is_set()); + EXPECT_FALSE(hint.get_include_end()); EXPECT_EQ("ccc", *hint.end_value_ref()); } { @@ -451,10 +401,9 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_str", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - std::string begin = std::string(3, 'c').append(6, 0x00).append(1, 0x01); - std::string end = std::string(len, static_cast(0xFF)); - EXPECT_EQ(begin, *hint.begin_value_ref()); - EXPECT_EQ(end, *hint.end_value_ref()); + EXPECT_FALSE(hint.end_value_ref().is_set()); + EXPECT_FALSE(hint.get_include_begin()); + EXPECT_EQ("ccc", hint.get_begin_value()); } { std::vector hints; @@ -468,10 +417,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_str", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - std::string begin = std::string(3, 'a').append(6, 0x00).append(1, 0x01); - std::string end = "ccc"; - EXPECT_EQ(begin, *hint.begin_value_ref()); - EXPECT_EQ(end, *hint.end_value_ref()); + EXPECT_FALSE(hint.get_include_begin()); + EXPECT_EQ("aaa", hint.get_begin_value()); + EXPECT_FALSE(hint.get_include_end()); + EXPECT_EQ("ccc", hint.get_end_value()); } { std::vector hints; @@ -485,10 +434,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_str", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - std::string begin = "aaa"; - std::string end = std::string(3, 'c').append(6, 0x00).append(1, 0x01); - EXPECT_EQ(begin, *hint.begin_value_ref()); - EXPECT_EQ(end, *hint.end_value_ref()); + EXPECT_TRUE(hint.get_include_begin()); + EXPECT_EQ("aaa", hint.get_begin_value()); + EXPECT_TRUE(hint.get_include_end()); + EXPECT_EQ("ccc", hint.get_end_value()); } { std::vector hints; @@ -502,8 +451,10 @@ TEST(IndexScanRuleTest, BoundValueRangeTest) { const auto& hint = hints[0]; EXPECT_EQ("col_str", *hint.column_name_ref()); EXPECT_EQ(storage::cpp2::ScanType::RANGE, *hint.scan_type_ref()); - EXPECT_EQ("aaa", *hint.begin_value_ref()); - EXPECT_EQ("ccc", *hint.end_value_ref()); + EXPECT_TRUE(hint.get_include_begin()); + EXPECT_EQ("aaa", hint.get_begin_value()); + EXPECT_FALSE(hint.get_include_end()); + EXPECT_EQ("ccc", hint.get_end_value()); } } } diff --git a/src/graph/util/ToJson.cpp b/src/graph/util/ToJson.cpp index 529f4f3e257..e3ec2d5dcbe 100644 --- a/src/graph/util/ToJson.cpp +++ b/src/graph/util/ToJson.cpp @@ -242,6 +242,10 @@ folly::dynamic toJson(const storage::cpp2::IndexColumnHint &hints) { obj.insert("beginValue", rtrim(begin)); auto end = toJson(hints.get_end_value()); obj.insert("endValue", rtrim(end)); + auto includeBegin = toJson(hints.get_include_begin()); + obj.insert("includeBegin", includeBegin); + auto includeEnd = toJson(hints.get_include_end()); + obj.insert("includeEnd", includeEnd); return obj; } diff --git a/src/storage/exec/IndexScanNode.cpp b/src/storage/exec/IndexScanNode.cpp index 33f344844ac..41e6cf17b5a 100644 --- a/src/storage/exec/IndexScanNode.cpp +++ b/src/storage/exec/IndexScanNode.cpp @@ -120,14 +120,14 @@ QualifiedStrategy::Result RangePath::qualified(const Map& ro } auto& hint = hints_.back(); // TODO(hs.zhang): improve performance.Check include or not during build key. - if (hint.begin_value_ref().is_set()) { + if (hint.begin_value_ref().is_set() && !hint.get_begin_value().empty()) { bool ret = includeStart_ ? hint.get_begin_value() <= rowData.at(hint.get_column_name()) : hint.get_begin_value() < rowData.at(hint.get_column_name()); if (!ret) { return QualifiedStrategy::INCOMPATIBLE; } } - if (hint.end_value_ref().is_set()) { + if (hint.end_value_ref().is_set() && !hint.get_end_value().empty()) { bool ret = includeEnd_ ? hint.get_end_value() >= rowData.at(hint.get_column_name()) : hint.get_end_value() > rowData.at(hint.get_column_name()); if (!ret) { @@ -159,13 +159,13 @@ void RangePath::buildKey() { auto [a, b] = encodeRange(hint, fieldIter->get_type(), index, commonIndexPrefix.size()); // left will be `[a`,`(a`, or `[INF` std::string left = - hint.begin_value_ref().is_set() + (hint.begin_value_ref().is_set() && !hint.get_begin_value().empty()) ? fmt::format( "{}{}", hint.get_include_begin() ? '[' : '(', hint.get_begin_value().toString()) : "[-INF"; // left will be `b]`,`b)`, or `[INF` std::string right = - hint.end_value_ref().is_set() + (hint.end_value_ref().is_set() && !hint.get_end_value().empty()) ? fmt::format("{}{}", hint.get_end_value().toString(), hint.get_include_end() ? ']' : ')') : "INF]"; serializeString_ += fmt::format("{}={},{}", hint.get_column_name(), left, right); @@ -174,7 +174,7 @@ void RangePath::buildKey() { // If `end_value` is not set, `b` will be empty. So `endKey_` should append '\xFF' until // endKey_.size() > `totalKeyLength_` to indicate positive infinity prefixed with // `commonIndexPrefix` - if (!hint.end_value_ref().is_set()) { + if (!hint.end_value_ref().is_set() || hint.get_end_value().empty()) { endKey_.append(totalKeyLength_ - endKey_.size() + 1, '\xFF'); } } @@ -186,14 +186,14 @@ std::tuple RangePath::encodeRange( size_t offset) { std::string startKey, endKey; bool needCheckNullable = !nullable_.empty() && nullable_[colIndex]; - if (hint.end_value_ref().is_set()) { + if (hint.end_value_ref().is_set() && !hint.get_end_value().empty()) { includeEnd_ = hint.get_include_end(); auto tmp = encodeEndValue(hint.get_end_value(), colTypeDef, endKey, offset); if (memcmp(tmp.data(), std::string(tmp.size(), '\xFF').data(), tmp.size()) != 0) { needCheckNullable &= false; } } - if (hint.begin_value_ref().is_set()) { + if (hint.begin_value_ref().is_set() && !hint.get_begin_value().empty()) { includeStart_ = hint.get_include_begin(); encodeBeginValue(hint.get_begin_value(), colTypeDef, startKey, offset); } diff --git a/tests/tck/features/bugfix/TruncatedStringIndex.feature b/tests/tck/features/bugfix/TruncatedStringIndex.feature new file mode 100644 index 00000000000..f1f30ecccc7 --- /dev/null +++ b/tests/tck/features/bugfix/TruncatedStringIndex.feature @@ -0,0 +1,75 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Truncated string index + + Scenario: Truncated string index + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + When executing query: + """ + create tag person(name string); + create tag index p1 on person(name(3)); + """ + Then the execution should be successful + And wait 4 seconds + When executing query: + """ + insert vertex person(name) values "1":("abc1"),"2":("abc2"); + """ + Then the execution should be successful + When executing query: + """ + LOOKUP ON person WHERE person.name=="abc" + """ + Then the result should be, in any order: + | VertexID | + When executing query: + """ + LOOKUP ON person WHERE person.name=="abc" YIELD person.name + """ + Then the result should be, in any order: + | VertexID | person.name | + When executing query: + """ + match (v:person) where v.name == "abc" return v; + """ + Then the result should be, in any order, with relax comparison: + | v | + When executing query: + """ + match (v:person) where v.name >= "abc" return v; + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("1" :person{name: "abc1"}) | + | ("2" :person{name: "abc2"}) | + When executing query: + """ + match (v:person{name:"abc1"}) return v; + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("1" :person{name: "abc1"}) | + When executing query: + """ + match (v:person) where v.name>"abc" return v; + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("1" :person{name: "abc1"}) | + | ("2" :person{name: "abc2"}) | + When executing query: + """ + match (v:person) where v.name<="abc2" return v; + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("1" :person{name: "abc1"}) | + | ("2" :person{name: "abc2"}) | + Then drop the used space diff --git a/tests/tck/features/optimizer/IndexScanRule.feature b/tests/tck/features/optimizer/IndexScanRule.feature index 1692535266f..d44c5701d7e 100644 --- a/tests/tck/features/optimizer/IndexScanRule.feature +++ b/tests/tck/features/optimizer/IndexScanRule.feature @@ -20,25 +20,67 @@ Feature: Match index selection | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 13 | | - | 13 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 15 | | - | 15 | GetVertices | 11 | | - | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"name","beginValue":"\"Tim Duncan","endValue":"\"Yao Ming"}}} | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 10 | Project | 13 | | + | 13 | Filter | 7 | | + | 7 | Project | 6 | | + | 6 | Project | 5 | | + | 5 | Filter | 15 | | + | 15 | GetVertices | 11 | | + | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"name","beginValue":"\"Tim Duncan\"","endValue":"\"Yao Ming\"","includeBegin":"false","includeEnd":"true"}}} | + | 0 | Start | | | + When profiling query: + """ + MATCH (v:player) + WHERE v.age>30 and v.age<=40 + RETURN v + """ + Then the result should be, in any order: + | v | + | ("Amar'e Stoudemire" :player{age: 36, name: "Amar'e Stoudemire"}) | + | ("Kobe Bryant" :player{age: 40, name: "Kobe Bryant"}) | + | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | + | ("Chris Paul" :player{age: 33, name: "Chris Paul"}) | + | ("Boris Diaw" :player{age: 36, name: "Boris Diaw"}) | + | ("LeBron James" :player{age: 34, name: "LeBron James"}) | + | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | + | ("David West" :player{age: 38, name: "David West"}) | + | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Rudy Gay" :player{age: 32, name: "Rudy Gay"}) | + | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | + | ("Stephen Curry" :player{age: 31, name: "Stephen Curry"}) | + | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | + | ("Aron Baynes" :player{age: 32, name: "Aron Baynes"}) | + | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | + | ("Rajon Rondo" :player{age: 33, name: "Rajon Rondo"}) | + | ("Carmelo Anthony" :player{age: 34, name: "Carmelo Anthony"}) | + | ("Dwyane Wade" :player{age: 37, name: "Dwyane Wade"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Dirk Nowitzki" :player{age: 40, name: "Dirk Nowitzki"}) | + | ("JaVale McGee" :player{age: 31, name: "JaVale McGee"}) | + | ("Dwight Howard" :player{age: 33, name: "Dwight Howard"}) | + And the execution plan should be: + | id | name | dependencies | operator info | + | 10 | Project | 13 | | + | 13 | Filter | 7 | | + | 7 | Project | 6 | | + | 6 | Project | 5 | | + | 5 | Filter | 15 | | + | 15 | GetVertices | 11 | | + | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"age","beginValue":"30","endValue":"40","includeBegin":"false","includeEnd":"true"}}} | + | 0 | Start | | | Scenario: or filter embeding When profiling query: """ MATCH (v:player) WHERE - v.name<="Aron Baynes" - or v.name>"Yao Ming" - or v.name=="Kobe Bryant" - or v.age>40 + v.name<="Aron Baynes" + or v.name>"Yao Ming" + or v.name=="Kobe Bryant" + or v.age>40 RETURN v """ Then the result should be, in any order: @@ -70,8 +112,8 @@ Feature: Match index selection """ MATCH (v:player)-[:like]->(n) WHERE - v.name<="Aron Baynes" - or n.age>45 + v.name<="Aron Baynes" + or n.age>45 RETURN v, n """ Then the result should be, in any order: @@ -101,11 +143,11 @@ Feature: Match index selection """ MATCH (v:player)-[:like]->(n) WHERE - v.name<="Aron Baynes" - or v.age>45 - or true - or v.age+1 - or v.name + v.name<="Aron Baynes" + or v.age>45 + or true + or v.age+1 + or v.name RETURN count(*) AS count """ Then the result should be, in any order: From 7f711bb2ea00b9ae1a1044c4feb53f71da58f5b1 Mon Sep 17 00:00:00 2001 From: Doodle <13706157+critical27@users.noreply.github.com> Date: Mon, 15 Nov 2021 22:50:53 -0600 Subject: [PATCH 19/53] separate thenValue/thenError when rpc fail (#3031) --- src/clients/storage/StorageClientBase-inl.h | 209 +++++++++++--------- src/clients/storage/StorageClientBase.h | 10 +- src/kvstore/raftex/Host.cpp | 57 ++++-- 3 files changed, 157 insertions(+), 119 deletions(-) diff --git a/src/clients/storage/StorageClientBase-inl.h b/src/clients/storage/StorageClientBase-inl.h index 6a3f7ac9651..79435d2252a 100644 --- a/src/clients/storage/StorageClientBase-inl.h +++ b/src/clients/storage/StorageClientBase-inl.h @@ -115,6 +115,7 @@ folly::SemiFuture> StorageClientBase::c folly::EventBase* evb, std::unordered_map requests, RemoteFunc&& remoteFunc) { + using TransportException = apache::thrift::transport::TTransportException; auto context = std::make_shared>( requests.size(), std::move(remoteFunc)); @@ -137,49 +138,64 @@ folly::SemiFuture> StorageClientBase::c // Since all requests are sent using the same eventbase, all // then-callback will be executed on the same IO thread .via(evb) - .then([this, context, host, spaceId, start](folly::Try&& val) { - if (val.hasException()) { - auto& r = context->findRequest(host); - LOG(ERROR) << "Request to " << host << " failed: " << val.exception().what(); - auto parts = getReqPartsId(r); - context->resp.appendFailedParts(parts, nebula::cpp2::ErrorCode::E_RPC_FAILURE); - invalidLeader(spaceId, parts); - context->resp.markFailure(); - } else { - auto resp = std::move(val.value()); - auto& result = resp.get_result(); - bool hasFailure{false}; - for (auto& code : result.get_failed_parts()) { - VLOG(3) << "Failure! Failed part " << code.get_part_id() << ", failed code " - << static_cast(code.get_code()); - hasFailure = true; - context->resp.emplaceFailedPart(code.get_part_id(), code.get_code()); - if (code.get_code() == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { - auto* leader = code.get_leader(); - if (isValidHostPtr(leader)) { - updateLeader(spaceId, code.get_part_id(), *leader); - } else { - invalidLeader(spaceId, code.get_part_id()); - } - } else if (code.get_code() == nebula::cpp2::ErrorCode::E_PART_NOT_FOUND || - code.get_code() == nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND) { - invalidLeader(spaceId, code.get_part_id()); + .thenValue([this, context, host, spaceId, start](Response&& resp) { + auto& result = resp.get_result(); + bool hasFailure{false}; + for (auto& code : result.get_failed_parts()) { + VLOG(3) << "Failure! Failed part " << code.get_part_id() << ", failed code " + << static_cast(code.get_code()); + hasFailure = true; + context->resp.emplaceFailedPart(code.get_part_id(), code.get_code()); + if (code.get_code() == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + auto* leader = code.get_leader(); + if (isValidHostPtr(leader)) { + updateLeader(spaceId, code.get_part_id(), *leader); } else { - // do nothing + invalidLeader(spaceId, code.get_part_id()); } + } else if (code.get_code() == nebula::cpp2::ErrorCode::E_PART_NOT_FOUND || + code.get_code() == nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND) { + invalidLeader(spaceId, code.get_part_id()); + } else { + // do nothing } - if (hasFailure) { - context->resp.markFailure(); - } - - // Adjust the latency - auto latency = result.get_latency_in_us(); - context->resp.setLatency(host, latency, time::WallClock::fastNowInMicroSec() - start); - - // Keep the response - context->resp.addResponse(std::move(resp)); + } + if (hasFailure) { + context->resp.markFailure(); } + // Adjust the latency + auto latency = result.get_latency_in_us(); + context->resp.setLatency(host, latency, time::WallClock::fastNowInMicroSec() - start); + + // Keep the response + context->resp.addResponse(std::move(resp)); + }) + .thenError(folly::tag_t{}, + [this, context, host, spaceId](TransportException&& ex) { + auto& r = context->findRequest(host); + auto parts = getReqPartsId(r); + if (ex.getType() == TransportException::TIMED_OUT) { + LOG(ERROR) << "Request to " << host << " time out: " << ex.what(); + } else { + invalidLeader(spaceId, parts); + LOG(ERROR) << "Request to " << host << " failed: " << ex.what(); + } + context->resp.appendFailedParts(parts, + nebula::cpp2::ErrorCode::E_RPC_FAILURE); + context->resp.markFailure(); + }) + .thenError(folly::tag_t{}, + [this, context, host, spaceId](std::exception&& ex) { + auto& r = context->findRequest(host); + auto parts = getReqPartsId(r); + LOG(ERROR) << "Request to " << host << " failed: " << ex.what(); + invalidLeader(spaceId, parts); + context->resp.appendFailedParts(parts, + nebula::cpp2::ErrorCode::E_RPC_FAILURE); + context->resp.markFailure(); + }) + .ensure([context, host] { if (context->removeRequest(host)) { // Received all responses context->promise.setValue(std::move(context->resp)); @@ -199,75 +215,76 @@ folly::SemiFuture> StorageClientBase::c template template folly::Future> StorageClientBase::getResponse( - folly::EventBase* evb, - std::pair&& request, - RemoteFunc&& remoteFunc, - folly::Promise> pro) { - auto f = pro.getFuture(); - getResponseImpl(evb, - std::forward(request), - std::forward(remoteFunc), - std::move(pro)); + folly::EventBase* evb, std::pair&& request, RemoteFunc&& remoteFunc) { + auto pro = std::make_shared>>(); + auto f = pro->getFuture(); + getResponseImpl( + evb, std::forward(request), std::forward(remoteFunc), pro); return f; } template template -void StorageClientBase::getResponseImpl(folly::EventBase* evb, - std::pair request, - RemoteFunc remoteFunc, - folly::Promise> pro) { +void StorageClientBase::getResponseImpl( + folly::EventBase* evb, + std::pair request, + RemoteFunc remoteFunc, + std::shared_ptr>> pro) { + using TransportException = apache::thrift::transport::TTransportException; if (evb == nullptr) { DCHECK(!!ioThreadPool_); evb = ioThreadPool_->getEventBase(); } - folly::via(evb, - [evb, - request = std::move(request), - remoteFunc = std::move(remoteFunc), - pro = std::move(pro), - this]() mutable { - auto host = request.first; - auto client = clientsMan_->client(host, evb, false, FLAGS_storage_client_timeout_ms); - auto spaceId = request.second.get_space_id(); - auto partsId = getReqPartsId(request.second); - LOG(INFO) << "Send request to storage " << host; - remoteFunc(client.get(), request.second) - .via(evb) - .then([spaceId, - partsId = std::move(partsId), - p = std::move(pro), - request = std::move(request), - remoteFunc = std::move(remoteFunc), - this](folly::Try&& t) mutable { - // exception occurred during RPC - if (t.hasException()) { - p.setValue(Status::Error(folly::stringPrintf( - "RPC failure in StorageClient: %s", t.exception().what().c_str()))); - invalidLeader(spaceId, partsId); - return; - } - auto&& resp = std::move(t.value()); - // leader changed - auto& result = resp.get_result(); - for (auto& code : result.get_failed_parts()) { - VLOG(3) << "Failure! Failed part " << code.get_part_id() << ", failed code " - << static_cast(code.get_code()); - if (code.get_code() == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { - auto* leader = code.get_leader(); - if (isValidHostPtr(leader)) { - updateLeader(spaceId, code.get_part_id(), *leader); + folly::via( + evb, + [evb, request = std::move(request), remoteFunc = std::move(remoteFunc), pro, this]() mutable { + auto host = request.first; + auto client = clientsMan_->client(host, evb, false, FLAGS_storage_client_timeout_ms); + auto spaceId = request.second.get_space_id(); + auto partsId = getReqPartsId(request.second); + LOG(INFO) << "Send request to storage " << host; + remoteFunc(client.get(), request.second) + .via(evb) + .thenValue([spaceId, pro, this](Response&& resp) mutable { + auto& result = resp.get_result(); + for (auto& code : result.get_failed_parts()) { + VLOG(3) << "Failure! Failed part " << code.get_part_id() << ", failed code " + << static_cast(code.get_code()); + if (code.get_code() == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + auto* leader = code.get_leader(); + if (isValidHostPtr(leader)) { + updateLeader(spaceId, code.get_part_id(), *leader); + } else { + invalidLeader(spaceId, code.get_part_id()); + } + } else if (code.get_code() == nebula::cpp2::ErrorCode::E_PART_NOT_FOUND || + code.get_code() == nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND) { + invalidLeader(spaceId, code.get_part_id()); + } + } + pro->setValue(std::move(resp)); + }) + .thenError(folly::tag_t{}, + [spaceId, partsId = std::move(partsId), host, pro, this]( + TransportException&& ex) mutable { + if (ex.getType() == TransportException::TIMED_OUT) { + LOG(ERROR) << "Request to " << host << " time out: " << ex.what(); } else { - invalidLeader(spaceId, code.get_part_id()); + invalidLeader(spaceId, partsId); + LOG(ERROR) << "Request to " << host << " failed: " << ex.what(); } - } else if (code.get_code() == nebula::cpp2::ErrorCode::E_PART_NOT_FOUND || - code.get_code() == nebula::cpp2::ErrorCode::E_SPACE_NOT_FOUND) { - invalidLeader(spaceId, code.get_part_id()); - } - } - p.setValue(std::move(resp)); - }); - }); // via + pro->setValue(Status::Error( + folly::stringPrintf("RPC failure in StorageClient: %s", ex.what()))); + }) + .thenError(folly::tag_t{}, + [spaceId, partsId = std::move(partsId), host, pro, this]( + std::exception&& ex) mutable { + // exception occurred during RPC + pro->setValue(Status::Error( + folly::stringPrintf("RPC failure in StorageClient: %s", ex.what()))); + invalidLeader(spaceId, partsId); + }); + }); // via } template diff --git a/src/clients/storage/StorageClientBase.h b/src/clients/storage/StorageClientBase.h index a9d347d10ed..a5bc2054114 100644 --- a/src/clients/storage/StorageClientBase.h +++ b/src/clients/storage/StorageClientBase.h @@ -141,11 +141,9 @@ class StorageClientBase { class RemoteFunc, class Response = typename std::result_of::type::value_type> - folly::Future> getResponse( - folly::EventBase* evb, - std::pair&& request, - RemoteFunc&& remoteFunc, - folly::Promise> pro = folly::Promise>()); + folly::Future> getResponse(folly::EventBase* evb, + std::pair&& request, + RemoteFunc&& remoteFunc); template request, RemoteFunc remoteFunc, - folly::Promise> pro); + std::shared_ptr>> pro); // Cluster given ids into the host they belong to // The method returns a map diff --git a/src/kvstore/raftex/Host.cpp b/src/kvstore/raftex/Host.cpp index 8982f62b6c6..20d8a400050 100644 --- a/src/kvstore/raftex/Host.cpp +++ b/src/kvstore/raftex/Host.cpp @@ -155,24 +155,10 @@ void Host::setResponse(const cpp2::AppendLogResponse& r) { } void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr req) { - sendAppendLogRequest(eb, std::move(req)) + using TransportException = apache::thrift::transport::TTransportException; + sendAppendLogRequest(eb, req) .via(eb) - .then([eb, self = shared_from_this()](folly::Try&& t) { - VLOG(3) << self->idStr_ << "appendLogs() call got response"; - if (t.hasException()) { - VLOG(2) << self->idStr_ << t.exception().what(); - cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); - { - std::lock_guard g(self->lock_); - self->setResponse(r); - self->lastLogIdSent_ = self->logIdToSend_ - 1; - } - self->noMoreRequestCV_.notify_all(); - return; - } - - cpp2::AppendLogResponse resp = std::move(t).value(); + .thenValue([eb, self = shared_from_this()](cpp2::AppendLogResponse&& resp) { LOG_IF(INFO, FLAGS_trace_raft) << self->idStr_ << "AppendLogResponse " << "code " << apache::thrift::util::enumNameSafe(resp.get_error_code()) << ", currTerm " @@ -357,6 +343,43 @@ void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr{}, + [self = shared_from_this(), req](TransportException&& ex) { + VLOG(2) << self->idStr_ << ex.what(); + cpp2::AppendLogResponse r; + r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); + { + std::lock_guard g(self->lock_); + if (ex.getType() == TransportException::TIMED_OUT) { + VLOG(2) << self->idStr_ << "append log time out" + << ", space " << req->get_space() << ", part " << req->get_part() + << ", current term " << req->get_current_term() << ", last_log_id " + << req->get_last_log_id() << ", committed_id " + << req->get_committed_log_id() << ", last_log_term_sent" + << req->get_last_log_term_sent() << ", last_log_id_sent " + << req->get_last_log_id_sent() << ", logs size " + << req->get_log_str_list().size(); + } + self->setResponse(r); + self->lastLogIdSent_ = self->logIdToSend_ - 1; + } + // a new raft log or heartbeat will trigger another appendLogs in Host + self->noMoreRequestCV_.notify_all(); + return; + }) + .thenError(folly::tag_t{}, [self = shared_from_this()](std::exception&& ex) { + VLOG(2) << self->idStr_ << ex.what(); + cpp2::AppendLogResponse r; + r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); + { + std::lock_guard g(self->lock_); + self->setResponse(r); + self->lastLogIdSent_ = self->logIdToSend_ - 1; + } + // a new raft log or heartbeat will trigger another appendLogs in Host + self->noMoreRequestCV_.notify_all(); + return; }); } From b92c65eedf4cdca1bf79a024cdb4f8a359745dd9 Mon Sep 17 00:00:00 2001 From: liwenhui-soul <38217397+liwenhui-soul@users.noreply.github.com> Date: Wed, 17 Nov 2021 09:40:01 +0800 Subject: [PATCH 20/53] move balancer to job manager (#3254) Co-authored-by: Doodle <13706157+critical27@users.noreply.github.com> --- src/clients/meta/MetaClient.cpp | 51 - src/clients/meta/MetaClient.h | 9 - src/common/datatypes/HostAddr.cpp | 6 + src/common/datatypes/HostAddr.h | 17 + src/common/datatypes/test/CMakeLists.txt | 1 + src/common/expression/test/CMakeLists.txt | 2 +- src/common/http/test/CMakeLists.txt | 2 + src/common/stats/test/CMakeLists.txt | 8 + src/common/utils/MetaKeyUtils.cpp | 45 +- src/common/utils/MetaKeyUtils.h | 14 +- src/graph/executor/CMakeLists.txt | 5 - src/graph/executor/Executor.cpp | 20 - src/graph/executor/admin/BalanceExecutor.cpp | 37 - src/graph/executor/admin/BalanceExecutor.h | 29 - .../executor/admin/BalanceLeadersExecutor.cpp | 34 - .../executor/admin/BalanceLeadersExecutor.h | 29 - .../executor/admin/ResetBalanceExecutor.cpp | 36 - .../executor/admin/ResetBalanceExecutor.h | 29 - .../executor/admin/ShowBalanceExecutor.cpp | 69 - .../executor/admin/ShowBalanceExecutor.h | 29 - .../executor/admin/StopBalanceExecutor.cpp | 36 - .../executor/admin/StopBalanceExecutor.h | 29 - .../executor/admin/SubmitJobExecutor.cpp | 88 +- src/graph/executor/admin/SubmitJobExecutor.h | 2 + src/graph/planner/plan/Admin.cpp | 12 - src/graph/planner/plan/Admin.h | 67 - src/graph/planner/plan/PlanNode.cpp | 10 - src/graph/planner/plan/PlanNode.h | 5 - src/graph/service/PermissionCheck.cpp | 1 - src/graph/validator/AdminJobValidator.h | 3 +- src/graph/validator/BalanceValidator.cpp | 53 - src/graph/validator/BalanceValidator.h | 30 - src/graph/validator/CMakeLists.txt | 1 - src/graph/validator/Validator.cpp | 3 - src/interface/meta.thrift | 28 +- src/kvstore/plugins/hbase/test/CMakeLists.txt | 4 + src/meta/CMakeLists.txt | 7 +- src/meta/MetaServiceHandler.cpp | 13 - src/meta/MetaServiceHandler.h | 4 - .../processors/admin/BalanceProcessor.cpp | 128 -- src/meta/processors/admin/BalanceProcessor.h | 33 - src/meta/processors/admin/Balancer.cpp | 1232 ----------------- src/meta/processors/admin/Balancer.h | 269 ---- .../admin/LeaderBalanceProcessor.cpp | 20 - .../processors/admin/LeaderBalanceProcessor.h | 32 - src/meta/processors/job/AdminJobProcessor.cpp | 9 +- .../processors/job/BalanceJobExecutor.cpp | 1207 +++++++++++++++- src/meta/processors/job/BalanceJobExecutor.h | 204 ++- .../processors/{admin => job}/BalancePlan.cpp | 90 +- .../processors/{admin => job}/BalancePlan.h | 30 +- .../processors/{admin => job}/BalanceTask.cpp | 10 +- .../processors/{admin => job}/BalanceTask.h | 27 +- src/meta/processors/job/JobDescription.cpp | 4 +- src/meta/processors/job/JobDescription.h | 2 +- src/meta/processors/job/JobManager.cpp | 157 ++- src/meta/processors/job/JobManager.h | 22 +- src/meta/processors/job/MetaJobExecutor.cpp | 41 +- src/meta/processors/job/MetaJobExecutor.h | 17 +- src/meta/test/AdminClientTest.cpp | 1 - src/meta/test/BalanceIntegrationTest.cpp | 1 - src/meta/test/BalancerTest.cpp | 454 +++--- src/meta/test/GetStatsTest.cpp | 4 +- src/meta/test/JobManagerTest.cpp | 36 +- src/parser/AdminSentences.cpp | 52 +- src/parser/AdminSentences.h | 43 - src/parser/Sentence.h | 1 - src/parser/parser.yy | 58 +- src/parser/test/ParserTest.cpp | 18 +- src/webservice/test/CMakeLists.txt | 8 + 69 files changed, 2168 insertions(+), 2910 deletions(-) delete mode 100644 src/graph/executor/admin/BalanceExecutor.cpp delete mode 100644 src/graph/executor/admin/BalanceExecutor.h delete mode 100644 src/graph/executor/admin/BalanceLeadersExecutor.cpp delete mode 100644 src/graph/executor/admin/BalanceLeadersExecutor.h delete mode 100644 src/graph/executor/admin/ResetBalanceExecutor.cpp delete mode 100644 src/graph/executor/admin/ResetBalanceExecutor.h delete mode 100644 src/graph/executor/admin/ShowBalanceExecutor.cpp delete mode 100644 src/graph/executor/admin/ShowBalanceExecutor.h delete mode 100644 src/graph/executor/admin/StopBalanceExecutor.cpp delete mode 100644 src/graph/executor/admin/StopBalanceExecutor.h delete mode 100644 src/graph/validator/BalanceValidator.cpp delete mode 100644 src/graph/validator/BalanceValidator.h delete mode 100644 src/meta/processors/admin/BalanceProcessor.cpp delete mode 100644 src/meta/processors/admin/BalanceProcessor.h delete mode 100644 src/meta/processors/admin/Balancer.cpp delete mode 100644 src/meta/processors/admin/Balancer.h delete mode 100644 src/meta/processors/admin/LeaderBalanceProcessor.cpp delete mode 100644 src/meta/processors/admin/LeaderBalanceProcessor.h rename src/meta/processors/{admin => job}/BalancePlan.cpp (69%) rename src/meta/processors/{admin => job}/BalancePlan.h (66%) rename src/meta/processors/{admin => job}/BalanceTask.cpp (96%) rename src/meta/processors/{admin => job}/BalanceTask.h (80%) diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index a6c709deae6..8953947e0cb 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -2579,57 +2579,6 @@ folly::Future>> MetaClient::getUserRoles(st return future; } -folly::Future> MetaClient::balance(std::vector hostDel, - bool isStop, - bool isReset) { - cpp2::BalanceReq req; - if (!hostDel.empty()) { - req.set_host_del(std::move(hostDel)); - } - if (isStop) { - req.set_stop(isStop); - } - if (isReset) { - req.set_reset(isReset); - } - - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_balance(request); }, - [](cpp2::BalanceResp&& resp) -> int64_t { return resp.get_id(); }, - std::move(promise)); - return future; -} - -folly::Future>> MetaClient::showBalance(int64_t balanceId) { - cpp2::BalanceReq req; - req.set_id(balanceId); - folly::Promise>> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_balance(request); }, - [](cpp2::BalanceResp&& resp) -> std::vector { return resp.get_tasks(); }, - std::move(promise)); - return future; -} - -folly::Future> MetaClient::balanceLeader() { - cpp2::LeaderBalanceReq req; - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_leaderBalance(request); }, - [](cpp2::ExecResp&& resp) -> bool { - return resp.get_code() == nebula::cpp2::ErrorCode::SUCCEEDED; - }, - std::move(promise)); - return future; -} - folly::Future> MetaClient::getTagDefaultValue(GraphSpaceID spaceId, TagID tagId, const std::string& field) { diff --git a/src/clients/meta/MetaClient.h b/src/clients/meta/MetaClient.h index 445cabdebf0..a80204f9609 100644 --- a/src/clients/meta/MetaClient.h +++ b/src/clients/meta/MetaClient.h @@ -391,15 +391,6 @@ class MetaClient { folly::Future>> getUserRoles(std::string account); - // Operations for admin - folly::Future> balance(std::vector hostDel, - bool isStop, - bool isReset); - - folly::Future>> showBalance(int64_t balanceId); - - folly::Future> balanceLeader(); - // Operations for config folly::Future> regConfig(const std::vector& items); diff --git a/src/common/datatypes/HostAddr.cpp b/src/common/datatypes/HostAddr.cpp index 5a7ce7faf94..930ef89395c 100644 --- a/src/common/datatypes/HostAddr.cpp +++ b/src/common/datatypes/HostAddr.cpp @@ -17,6 +17,12 @@ bool HostAddr::operator==(const HostAddr& rhs) const { bool HostAddr::operator!=(const HostAddr& rhs) const { return !(*this == rhs); } +HostAddr& HostAddr::operator=(const HostAddr& rhs) { + host = rhs.host; + port = rhs.port; + return *this; +} + bool HostAddr::operator<(const HostAddr& rhs) const { if (host == rhs.host) { return port < rhs.port; diff --git a/src/common/datatypes/HostAddr.h b/src/common/datatypes/HostAddr.h index a6cfa3c1ba4..69316691191 100644 --- a/src/common/datatypes/HostAddr.h +++ b/src/common/datatypes/HostAddr.h @@ -8,6 +8,7 @@ #include +#include "common/base/Logging.h" #include "common/thrift/ThriftTypes.h" namespace nebula { @@ -25,6 +26,8 @@ struct HostAddr { * */ HostAddr(int h, int p) = delete; HostAddr(std::string h, Port p) : host(std::move(h)), port(p) {} + HostAddr(const HostAddr& other) : host(other.host), port(other.port) {} + HostAddr(HostAddr&& other) : host(std::move(other.host)), port(std::move(other.port)) {} void clear() { host.clear(); @@ -40,11 +43,25 @@ struct HostAddr { return os.str(); } + HostAddr& operator=(const HostAddr& rhs); + bool operator==(const HostAddr& rhs) const; bool operator!=(const HostAddr& rhs) const; bool operator<(const HostAddr& rhs) const; + + static HostAddr fromString(const std::string& str) { + HostAddr ha; + auto pos = str.find(":"); + if (pos == std::string::npos) { + LOG(ERROR) << "HostAddr: parse string error"; + return ha; + } + ha.host = str.substr(1, pos - 2); + ha.port = std::stoi(str.substr(pos + 1)); + return ha; + } }; inline std::ostream& operator<<(std::ostream& os, const HostAddr& addr) { diff --git a/src/common/datatypes/test/CMakeLists.txt b/src/common/datatypes/test/CMakeLists.txt index 92d157bd15c..2ec47fd1962 100644 --- a/src/common/datatypes/test/CMakeLists.txt +++ b/src/common/datatypes/test/CMakeLists.txt @@ -45,6 +45,7 @@ nebula_add_test( $ $ $ + $ LIBRARIES gtest ${THRIFT_LIBRARIES} diff --git a/src/common/expression/test/CMakeLists.txt b/src/common/expression/test/CMakeLists.txt index a58a3341f66..6781b5da6d3 100644 --- a/src/common/expression/test/CMakeLists.txt +++ b/src/common/expression/test/CMakeLists.txt @@ -9,11 +9,11 @@ set(expression_test_common_libs $ $ $ + $ $ $ $ $ - $ $ $ $ diff --git a/src/common/http/test/CMakeLists.txt b/src/common/http/test/CMakeLists.txt index b8d8a602a6d..31f3ce50301 100644 --- a/src/common/http/test/CMakeLists.txt +++ b/src/common/http/test/CMakeLists.txt @@ -17,6 +17,8 @@ nebula_add_test( $ $ $ + $ + $ LIBRARIES ${PROXYGEN_LIBRARIES} gtest diff --git a/src/common/stats/test/CMakeLists.txt b/src/common/stats/test/CMakeLists.txt index 36876b2a6b9..8acaed18fcc 100644 --- a/src/common/stats/test/CMakeLists.txt +++ b/src/common/stats/test/CMakeLists.txt @@ -9,9 +9,11 @@ nebula_add_test( StatsManagerTest.cpp OBJECTS $ + $ $ $ $ + $ LIBRARIES gtest ) @@ -23,9 +25,11 @@ nebula_add_test( StatsManagerRateTest.cpp OBJECTS $ + $ $ $ $ + $ LIBRARIES gtest ) @@ -37,9 +41,11 @@ nebula_add_test( StatsManagerCrossLevelTest.cpp OBJECTS $ + $ $ $ $ + $ LIBRARIES gtest ) @@ -52,8 +58,10 @@ nebula_add_executable( StatsManagerBenchmark.cpp OBJECTS $ + $ $ $ + $ LIBRARIES follybenchmark boost_regex ) diff --git a/src/common/utils/MetaKeyUtils.cpp b/src/common/utils/MetaKeyUtils.cpp index a5ace5827d7..92dfa0ea400 100644 --- a/src/common/utils/MetaKeyUtils.cpp +++ b/src/common/utils/MetaKeyUtils.cpp @@ -876,11 +876,11 @@ std::string MetaKeyUtils::genTimestampStr() { std::string MetaKeyUtils::idKey() { return kIdKey; } std::string MetaKeyUtils::balanceTaskKey( - BalanceID balanceId, GraphSpaceID spaceId, PartitionID partId, HostAddr src, HostAddr dst) { + JobID jobId, GraphSpaceID spaceId, PartitionID partId, HostAddr src, HostAddr dst) { std::string str; str.reserve(64); str.append(reinterpret_cast(kBalanceTaskTable.data()), kBalanceTaskTable.size()) - .append(reinterpret_cast(&balanceId), sizeof(BalanceID)) + .append(reinterpret_cast(&jobId), sizeof(JobID)) .append(reinterpret_cast(&spaceId), sizeof(GraphSpaceID)) .append(reinterpret_cast(&partId), sizeof(PartitionID)) .append(serializeHostAddr(src)) @@ -901,39 +901,19 @@ std::string MetaKeyUtils::balanceTaskVal(BalanceTaskStatus status, return val; } -std::string MetaKeyUtils::balanceTaskPrefix(BalanceID balanceId) { +std::string MetaKeyUtils::balanceTaskPrefix(JobID jobId) { std::string prefix; prefix.reserve(32); prefix.append(reinterpret_cast(kBalanceTaskTable.data()), kBalanceTaskTable.size()) - .append(reinterpret_cast(&balanceId), sizeof(BalanceID)); + .append(reinterpret_cast(&jobId), sizeof(JobID)); return prefix; } -std::string MetaKeyUtils::balancePlanKey(BalanceID id) { - CHECK_GE(id, 0); - // make the balance id is stored in decend order - auto encode = folly::Endian::big(std::numeric_limits::max() - id); - std::string key; - key.reserve(sizeof(BalanceID) + kBalancePlanTable.size()); - key.append(reinterpret_cast(kBalancePlanTable.data()), kBalancePlanTable.size()) - .append(reinterpret_cast(&encode), sizeof(BalanceID)); - return key; -} - -std::string MetaKeyUtils::balancePlanVal(BalanceStatus status) { - std::string val; - val.reserve(sizeof(BalanceStatus)); - val.append(reinterpret_cast(&status), sizeof(BalanceStatus)); - return val; -} - -std::string MetaKeyUtils::balancePlanPrefix() { return kBalancePlanTable; } - std::tuple MetaKeyUtils::parseBalanceTaskKey(const folly::StringPiece& rawKey) { uint32_t offset = kBalanceTaskTable.size(); - auto balanceId = *reinterpret_cast(rawKey.begin() + offset); - offset += sizeof(balanceId); + auto jobId = *reinterpret_cast(rawKey.begin() + offset); + offset += sizeof(jobId); auto spaceId = *reinterpret_cast(rawKey.begin() + offset); offset += sizeof(GraphSpaceID); auto partId = *reinterpret_cast(rawKey.begin() + offset); @@ -941,7 +921,7 @@ MetaKeyUtils::parseBalanceTaskKey(const folly::StringPiece& rawKey) { auto src = MetaKeyUtils::deserializeHostAddr({rawKey, offset}); offset += src.host.size() + sizeof(size_t) + sizeof(uint32_t); auto dst = MetaKeyUtils::deserializeHostAddr({rawKey, offset}); - return std::make_tuple(balanceId, spaceId, partId, src, dst); + return std::make_tuple(jobId, spaceId, partId, src, dst); } std::tuple @@ -964,17 +944,6 @@ std::string MetaKeyUtils::groupKey(const std::string& group) { return key; } -BalanceID MetaKeyUtils::parseBalanceID(const folly::StringPiece& rawKey) { - auto decode = *reinterpret_cast(rawKey.begin() + kBalancePlanTable.size()); - auto id = std::numeric_limits::max() - folly::Endian::big(decode); - CHECK_GE(id, 0); - return id; -} - -BalanceStatus MetaKeyUtils::parseBalanceStatus(const folly::StringPiece& rawVal) { - return static_cast(*rawVal.begin()); -} - std::string MetaKeyUtils::groupVal(const std::vector& zones) { return folly::join(",", zones); } diff --git a/src/common/utils/MetaKeyUtils.h b/src/common/utils/MetaKeyUtils.h index 596280ae0e2..49e5b794df1 100644 --- a/src/common/utils/MetaKeyUtils.h +++ b/src/common/utils/MetaKeyUtils.h @@ -265,24 +265,14 @@ class MetaKeyUtils final { static HostAddr deserializeHostAddr(folly::StringPiece str); static std::string balanceTaskKey( - BalanceID balanceId, GraphSpaceID spaceId, PartitionID partId, HostAddr src, HostAddr dst); + JobID jobId, GraphSpaceID spaceId, PartitionID partId, HostAddr src, HostAddr dst); static std::string balanceTaskVal(BalanceTaskStatus status, BalanceTaskResult retult, int64_t startTime, int64_t endTime); - static std::string balanceTaskPrefix(BalanceID balanceId); - - static std::string balancePlanKey(BalanceID id); - - static std::string balancePlanVal(BalanceStatus status); - - static std::string balancePlanPrefix(); - - static BalanceID parseBalanceID(const folly::StringPiece& rawKey); - - static BalanceStatus parseBalanceStatus(const folly::StringPiece& rawVal); + static std::string balanceTaskPrefix(JobID jobId); static std::tuple parseBalanceTaskKey( const folly::StringPiece& rawKey); diff --git a/src/graph/executor/CMakeLists.txt b/src/graph/executor/CMakeLists.txt index a37e82e309f..a1649d87ff6 100644 --- a/src/graph/executor/CMakeLists.txt +++ b/src/graph/executor/CMakeLists.txt @@ -51,11 +51,6 @@ nebula_add_library( admin/ListUsersExecutor.cpp admin/ListRolesExecutor.cpp admin/SubmitJobExecutor.cpp - admin/BalanceExecutor.cpp - admin/StopBalanceExecutor.cpp - admin/ResetBalanceExecutor.cpp - admin/BalanceLeadersExecutor.cpp - admin/ShowBalanceExecutor.cpp admin/ShowHostsExecutor.cpp admin/ShowMetaLeaderExecutor.cpp admin/SpaceExecutor.cpp diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index fa515f26212..db8178cea46 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -16,8 +16,6 @@ #include "graph/context/ExecutionContext.h" #include "graph/context/QueryContext.h" #include "graph/executor/ExecutionError.h" -#include "graph/executor/admin/BalanceExecutor.h" -#include "graph/executor/admin/BalanceLeadersExecutor.h" #include "graph/executor/admin/ChangePasswordExecutor.h" #include "graph/executor/admin/CharsetExecutor.h" #include "graph/executor/admin/ConfigExecutor.h" @@ -33,10 +31,8 @@ #include "graph/executor/admin/ListUsersExecutor.h" #include "graph/executor/admin/ListenerExecutor.h" #include "graph/executor/admin/PartExecutor.h" -#include "graph/executor/admin/ResetBalanceExecutor.h" #include "graph/executor/admin/RevokeRoleExecutor.h" #include "graph/executor/admin/SessionExecutor.h" -#include "graph/executor/admin/ShowBalanceExecutor.h" #include "graph/executor/admin/ShowHostsExecutor.h" #include "graph/executor/admin/ShowMetaLeaderExecutor.h" #include "graph/executor/admin/ShowQueriesExecutor.h" @@ -46,7 +42,6 @@ #include "graph/executor/admin/SignOutTSServiceExecutor.h" #include "graph/executor/admin/SnapshotExecutor.h" #include "graph/executor/admin/SpaceExecutor.h" -#include "graph/executor/admin/StopBalanceExecutor.h" #include "graph/executor/admin/SubmitJobExecutor.h" #include "graph/executor/admin/SwitchSpaceExecutor.h" #include "graph/executor/admin/UpdateUserExecutor.h" @@ -389,21 +384,6 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kListRoles: { return pool->add(new ListRolesExecutor(node, qctx)); } - case PlanNode::Kind::kBalanceLeaders: { - return pool->add(new BalanceLeadersExecutor(node, qctx)); - } - case PlanNode::Kind::kBalance: { - return pool->add(new BalanceExecutor(node, qctx)); - } - case PlanNode::Kind::kStopBalance: { - return pool->add(new StopBalanceExecutor(node, qctx)); - } - case PlanNode::Kind::kResetBalance: { - return pool->add(new ResetBalanceExecutor(node, qctx)); - } - case PlanNode::Kind::kShowBalance: { - return pool->add(new ShowBalanceExecutor(node, qctx)); - } case PlanNode::Kind::kShowConfigs: { return pool->add(new ShowConfigsExecutor(node, qctx)); } diff --git a/src/graph/executor/admin/BalanceExecutor.cpp b/src/graph/executor/admin/BalanceExecutor.cpp deleted file mode 100644 index e7bddb85c4a..00000000000 --- a/src/graph/executor/admin/BalanceExecutor.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/BalanceExecutor.h" - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future BalanceExecutor::execute() { - SCOPED_TIMER(&execTime_); - return balance(); -} - -folly::Future BalanceExecutor::balance() { - auto *bNode = asNode(node()); - return qctx() - ->getMetaClient() - ->balance(bNode->deleteHosts(), false, false) - .via(runner()) - .thenValue([this](StatusOr resp) { - SCOPED_TIMER(&execTime_); - if (!resp.ok()) { - LOG(ERROR) << resp.status(); - return resp.status(); - } - DataSet v({"ID"}); - v.emplace_back(Row({resp.value()})); - return finish(std::move(v)); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/BalanceExecutor.h b/src/graph/executor/admin/BalanceExecutor.h deleted file mode 100644 index 72878d8effb..00000000000 --- a/src/graph/executor/admin/BalanceExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_BALANCEEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_BALANCEEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class BalanceExecutor final : public Executor { - public: - BalanceExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("BalanceExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future balance(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_BALANCEEXECUTOR_H_ diff --git a/src/graph/executor/admin/BalanceLeadersExecutor.cpp b/src/graph/executor/admin/BalanceLeadersExecutor.cpp deleted file mode 100644 index 48182a2b4de..00000000000 --- a/src/graph/executor/admin/BalanceLeadersExecutor.cpp +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/BalanceLeadersExecutor.h" - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future BalanceLeadersExecutor::execute() { - SCOPED_TIMER(&execTime_); - return balanceLeaders(); -} - -folly::Future BalanceLeadersExecutor::balanceLeaders() { - return qctx()->getMetaClient()->balanceLeader().via(runner()).thenValue( - [this](StatusOr resp) { - SCOPED_TIMER(&execTime_); - if (!resp.ok()) { - LOG(ERROR) << resp.status(); - return resp.status(); - } - if (!resp.value()) { - return Status::Error("Balance leaders failed"); - } - return Status::OK(); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/BalanceLeadersExecutor.h b/src/graph/executor/admin/BalanceLeadersExecutor.h deleted file mode 100644 index 02a4d215cbc..00000000000 --- a/src/graph/executor/admin/BalanceLeadersExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class BalanceLeadersExecutor final : public Executor { - public: - BalanceLeadersExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("BaanceLeadersExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future balanceLeaders(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ diff --git a/src/graph/executor/admin/ResetBalanceExecutor.cpp b/src/graph/executor/admin/ResetBalanceExecutor.cpp deleted file mode 100644 index f461b6de8b5..00000000000 --- a/src/graph/executor/admin/ResetBalanceExecutor.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/ResetBalanceExecutor.h" - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future ResetBalanceExecutor::execute() { - SCOPED_TIMER(&execTime_); - return resetBalance(); -} - -folly::Future ResetBalanceExecutor::resetBalance() { - return qctx() - ->getMetaClient() - ->balance({}, false, true) - .via(runner()) - .thenValue([this](StatusOr resp) { - SCOPED_TIMER(&execTime_); - if (!resp.ok()) { - LOG(ERROR) << resp.status(); - return resp.status(); - } - DataSet v({"ID"}); - v.emplace_back(Row({resp.value()})); - return finish(std::move(v)); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/ResetBalanceExecutor.h b/src/graph/executor/admin/ResetBalanceExecutor.h deleted file mode 100644 index 4b2d69f6fb2..00000000000 --- a/src/graph/executor/admin/ResetBalanceExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_RESETBALANCEEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_RESETBALANCEEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class ResetBalanceExecutor final : public Executor { - public: - ResetBalanceExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("ResetBalanceExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future resetBalance(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_RESETBALANCEEXECUTOR_H_ diff --git a/src/graph/executor/admin/ShowBalanceExecutor.cpp b/src/graph/executor/admin/ShowBalanceExecutor.cpp deleted file mode 100644 index 3df7ae08021..00000000000 --- a/src/graph/executor/admin/ShowBalanceExecutor.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/ShowBalanceExecutor.h" - -#include - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future ShowBalanceExecutor::execute() { - SCOPED_TIMER(&execTime_); - return showBalance(); -} - -folly::Future ShowBalanceExecutor::showBalance() { - auto *sbNode = asNode(node()); - return qctx() - ->getMetaClient() - ->showBalance(sbNode->jobId()) - .via(runner()) - .thenValue([this](StatusOr> resp) { - SCOPED_TIMER(&execTime_); - if (!resp.ok()) { - LOG(ERROR) << resp.status(); - return std::move(resp).status(); - } - auto tasks = std::move(resp).value(); - // TODO(shylock) typed items instead binary - // E.G. "balanceId", "spaceId", "partId", "from", "to" - uint32_t total = tasks.size(), succeeded = 0, failed = 0, inProgress = 0, invalid = 0; - DataSet v({"balanceId, spaceId:partId, src->dst", "status"}); - for (auto &task : tasks) { - switch (task.get_result()) { - case meta::cpp2::TaskResult::FAILED: - ++failed; - break; - case meta::cpp2::TaskResult::IN_PROGRESS: - ++inProgress; - break; - case meta::cpp2::TaskResult::INVALID: - ++invalid; - break; - case meta::cpp2::TaskResult::SUCCEEDED: - ++succeeded; - break; - } - v.emplace_back(Row( - {std::move(task).get_id(), apache::thrift::util::enumNameSafe(task.get_result())})); - } - double percentage = total == 0 ? 0 : static_cast(succeeded) / total * 100; - v.emplace_back(Row({folly::sformat("Total:{}, Succeeded:{}, Failed:{}, " - "In Progress:{}, Invalid:{}", - total, - succeeded, - failed, - inProgress, - invalid), - percentage})); - return finish(std::move(v)); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/ShowBalanceExecutor.h b/src/graph/executor/admin/ShowBalanceExecutor.h deleted file mode 100644 index dfb89df47fc..00000000000 --- a/src/graph/executor/admin/ShowBalanceExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_SHOWBALANCEEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_SHOWBALANCEEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class ShowBalanceExecutor final : public Executor { - public: - ShowBalanceExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("ShowBalanceExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future showBalance(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_SHOWBALANCEEXECUTOR_H_ diff --git a/src/graph/executor/admin/StopBalanceExecutor.cpp b/src/graph/executor/admin/StopBalanceExecutor.cpp deleted file mode 100644 index b727032938c..00000000000 --- a/src/graph/executor/admin/StopBalanceExecutor.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/StopBalanceExecutor.h" - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future StopBalanceExecutor::execute() { - SCOPED_TIMER(&execTime_); - return stopBalance(); -} - -folly::Future StopBalanceExecutor::stopBalance() { - return qctx() - ->getMetaClient() - ->balance({}, true, false) - .via(runner()) - .thenValue([this](StatusOr resp) { - SCOPED_TIMER(&execTime_); - if (!resp.ok()) { - LOG(ERROR) << resp.status(); - return resp.status(); - } - DataSet v({"ID"}); - v.emplace_back(Row({resp.value()})); - return finish(std::move(v)); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/StopBalanceExecutor.h b/src/graph/executor/admin/StopBalanceExecutor.h deleted file mode 100644 index 6b79b6f9181..00000000000 --- a/src/graph/executor/admin/StopBalanceExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_STOPBALANCEEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_STOPBALANCEEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class StopBalanceExecutor final : public Executor { - public: - StopBalanceExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("StopBalanceExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future stopBalance(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_STOPBALANCEEXECUTOR_H_ diff --git a/src/graph/executor/admin/SubmitJobExecutor.cpp b/src/graph/executor/admin/SubmitJobExecutor.cpp index e0997ba61f0..64946b394b8 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.cpp +++ b/src/graph/executor/admin/SubmitJobExecutor.cpp @@ -62,7 +62,6 @@ StatusOr SubmitJobExecutor::buildResult(meta::cpp2::AdminJobOp jobOp, return v; } case meta::cpp2::AdminJobOp::SHOW: { - nebula::DataSet v({"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"}); DCHECK(resp.job_desc_ref().has_value()); if (!resp.job_desc_ref().has_value()) { return Status::Error("Response unexpected."); @@ -72,26 +71,7 @@ StatusOr SubmitJobExecutor::buildResult(meta::cpp2::AdminJobOp jobOp, return Status::Error("Response unexpected"); } auto &jobDesc = *resp.job_desc_ref(); - // job desc - v.emplace_back(nebula::Row({ - jobDesc.front().get_id(), - apache::thrift::util::enumNameSafe(jobDesc.front().get_cmd()), - apache::thrift::util::enumNameSafe(jobDesc.front().get_status()), - convertJobTimestampToDateTime(jobDesc.front().get_start_time()), - convertJobTimestampToDateTime(jobDesc.front().get_stop_time()), - })); - // tasks desc - auto &tasksDesc = *resp.get_task_desc(); - for (const auto &taskDesc : tasksDesc) { - v.emplace_back(nebula::Row({ - taskDesc.get_task_id(), - taskDesc.get_host().host, - apache::thrift::util::enumNameSafe(taskDesc.get_status()), - convertJobTimestampToDateTime(taskDesc.get_start_time()), - convertJobTimestampToDateTime(taskDesc.get_stop_time()), - })); - } - return v; + return buildShowResultData(jobDesc.front(), *resp.get_task_desc()); } case meta::cpp2::AdminJobOp::SHOW_All: { nebula::DataSet v({"Job Id", "Command", "Status", "Start Time", "Stop Time"}); @@ -126,5 +106,71 @@ Value SubmitJobExecutor::convertJobTimestampToDateTime(int64_t timestamp) { return timestamp > 0 ? Value(time::TimeConversion::unixSecondsToDateTime(timestamp)) : Value::kEmpty; } + +nebula::DataSet SubmitJobExecutor::buildShowResultData( + const nebula::meta::cpp2::JobDesc &jd, const std::vector &td) { + if (jd.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE) { + nebula::DataSet v( + {"Job Id(spaceId:partId)", "Command(src->dst)", "Status", "Start Time", "Stop Time"}); + const auto ¶s = jd.get_paras(); + size_t index = std::stoul(paras.back()); + uint32_t total = paras.size() - index - 1, succeeded = 0, failed = 0, inProgress = 0, + invalid = 0; + v.emplace_back(Row({jd.get_id(), + apache::thrift::util::enumNameSafe(jd.get_cmd()), + apache::thrift::util::enumNameSafe(jd.get_status()), + convertJobTimestampToDateTime(jd.get_start_time()).toString(), + convertJobTimestampToDateTime(jd.get_stop_time()).toString()})); + for (size_t i = index; i < paras.size() - 1; i++) { + meta::cpp2::BalanceTask tsk; + apache::thrift::CompactSerializer::deserialize(paras[i], tsk); + switch (tsk.get_result()) { + case meta::cpp2::TaskResult::FAILED: + ++failed; + break; + case meta::cpp2::TaskResult::IN_PROGRESS: + ++inProgress; + break; + case meta::cpp2::TaskResult::INVALID: + ++invalid; + break; + case meta::cpp2::TaskResult::SUCCEEDED: + ++succeeded; + break; + } + v.emplace_back(Row({std::move(tsk).get_id(), + std::move(tsk).get_command(), + apache::thrift::util::enumNameSafe(tsk.get_result()), + convertJobTimestampToDateTime(std::move(tsk).get_start_time()), + convertJobTimestampToDateTime(std::move(tsk).get_stop_time())})); + } + v.emplace_back(Row({folly::sformat("Total:{}", total), + folly::sformat("Succeeded:{}", succeeded), + folly::sformat("Failed:{}", failed), + folly::sformat("In Progress:{}", inProgress), + folly::sformat("Invalid:{}", invalid)})); + return v; + } else { + nebula::DataSet v({"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"}); + v.emplace_back(nebula::Row({ + jd.get_id(), + apache::thrift::util::enumNameSafe(jd.get_cmd()), + apache::thrift::util::enumNameSafe(jd.get_status()), + convertJobTimestampToDateTime(jd.get_start_time()), + convertJobTimestampToDateTime(jd.get_stop_time()), + })); + // tasks desc + for (const auto &taskDesc : td) { + v.emplace_back(nebula::Row({ + taskDesc.get_task_id(), + taskDesc.get_host().host, + apache::thrift::util::enumNameSafe(taskDesc.get_status()), + convertJobTimestampToDateTime(taskDesc.get_start_time()), + convertJobTimestampToDateTime(taskDesc.get_stop_time()), + })); + } + return v; + } +} } // namespace graph } // namespace nebula diff --git a/src/graph/executor/admin/SubmitJobExecutor.h b/src/graph/executor/admin/SubmitJobExecutor.h index 669e783977f..92bf2a98857 100644 --- a/src/graph/executor/admin/SubmitJobExecutor.h +++ b/src/graph/executor/admin/SubmitJobExecutor.h @@ -22,6 +22,8 @@ class SubmitJobExecutor final : public Executor { FRIEND_TEST(JobTest, JobFinishTime); StatusOr buildResult(meta::cpp2::AdminJobOp jobOp, meta::cpp2::AdminJobResult &&resp); Value convertJobTimestampToDateTime(int64_t timestamp); + nebula::DataSet buildShowResultData(const nebula::meta::cpp2::JobDesc &jd, + const std::vector &td); }; } // namespace graph diff --git a/src/graph/planner/plan/Admin.cpp b/src/graph/planner/plan/Admin.cpp index 5400ff50100..7361c531163 100644 --- a/src/graph/planner/plan/Admin.cpp +++ b/src/graph/planner/plan/Admin.cpp @@ -156,18 +156,6 @@ std::unique_ptr SubmitJob::explain() const { return desc; } -std::unique_ptr Balance::explain() const { - auto desc = SingleDependencyNode::explain(); - addDescription("deleteHosts", folly::toJson(util::toJson(deleteHosts_)), desc.get()); - return desc; -} - -std::unique_ptr ShowBalance::explain() const { - auto desc = SingleDependencyNode::explain(); - addDescription("balanceId", util::toJson(id_), desc.get()); - return desc; -} - std::unique_ptr ShowQueries::explain() const { auto desc = SingleDependencyNode::explain(); addDescription("isAll", util::toJson(isAll()), desc.get()); diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index 8caac08f917..c0dee822263 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -733,73 +733,6 @@ class SubmitJob final : public SingleDependencyNode { const std::vector params_; }; -class BalanceLeaders final : public SingleDependencyNode { - public: - static BalanceLeaders* make(QueryContext* qctx, PlanNode* dep) { - return qctx->objPool()->add(new BalanceLeaders(qctx, dep)); - } - - private: - explicit BalanceLeaders(QueryContext* qctx, PlanNode* dep) - : SingleDependencyNode(qctx, Kind::kBalanceLeaders, dep) {} -}; - -class Balance final : public SingleDependencyNode { - public: - static Balance* make(QueryContext* qctx, PlanNode* dep, std::vector deleteHosts) { - return qctx->objPool()->add(new Balance(qctx, dep, std::move(deleteHosts))); - } - - std::unique_ptr explain() const override; - - const std::vector& deleteHosts() const { return deleteHosts_; } - - private: - Balance(QueryContext* qctx, PlanNode* dep, std::vector deleteHosts) - : SingleDependencyNode(qctx, Kind::kBalance, dep), deleteHosts_(std::move(deleteHosts)) {} - - std::vector deleteHosts_; -}; - -class StopBalance final : public SingleDependencyNode { - public: - static StopBalance* make(QueryContext* qctx, PlanNode* dep) { - return qctx->objPool()->add(new StopBalance(qctx, dep)); - } - - private: - explicit StopBalance(QueryContext* qctx, PlanNode* dep) - : SingleDependencyNode(qctx, Kind::kStopBalance, dep) {} -}; - -class ResetBalance final : public SingleDependencyNode { - public: - static ResetBalance* make(QueryContext* qctx, PlanNode* dep) { - return qctx->objPool()->add(new ResetBalance(qctx, dep)); - } - - private: - explicit ResetBalance(QueryContext* qctx, PlanNode* dep) - : SingleDependencyNode(qctx, Kind::kResetBalance, dep) {} -}; - -class ShowBalance final : public SingleDependencyNode { - public: - static ShowBalance* make(QueryContext* qctx, PlanNode* dep, int64_t jobId) { - return qctx->objPool()->add(new ShowBalance(qctx, dep, jobId)); - } - - std::unique_ptr explain() const override; - - int64_t jobId() const { return jobId_; } - - private: - ShowBalance(QueryContext* qctx, PlanNode* dep, int64_t jobId) - : SingleDependencyNode(qctx, Kind::kShowBalance, dep), jobId_(jobId) {} - - int64_t jobId_; -}; - class ShowCharset final : public SingleDependencyNode { public: static ShowCharset* make(QueryContext* qctx, PlanNode* input) { diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 77dc04b5df9..6b0cfcfc8ad 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -186,16 +186,6 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "DropSnapshot"; case Kind::kShowSnapshots: return "ShowSnapshots"; - case Kind::kBalanceLeaders: - return "BalanceLeaders"; - case Kind::kBalance: - return "Balance"; - case Kind::kStopBalance: - return "StopBalance"; - case Kind::kResetBalance: - return "ResetBalance"; - case Kind::kShowBalance: - return "ShowBalance"; case Kind::kSubmitJob: return "SubmitJob"; case Kind::kLeftJoin: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index 7ad7d670f30..d81bf638242 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -105,11 +105,6 @@ class PlanNode { kShowEdgeIndexStatus, kInsertVertices, kInsertEdges, - kBalanceLeaders, - kBalance, - kStopBalance, - kResetBalance, - kShowBalance, kSubmitJob, kShowHosts, diff --git a/src/graph/service/PermissionCheck.cpp b/src/graph/service/PermissionCheck.cpp index f23bac7386b..626d7ec4a04 100644 --- a/src/graph/service/PermissionCheck.cpp +++ b/src/graph/service/PermissionCheck.cpp @@ -67,7 +67,6 @@ Status PermissionCheck::permissionCheck(ClientSession *session, case Sentence::Kind::kListZones: case Sentence::Kind::kAddHostIntoZone: case Sentence::Kind::kDropHostFromZone: - case Sentence::Kind::kBalance: case Sentence::Kind::kShowConfigs: case Sentence::Kind::kSetConfig: case Sentence::Kind::kGetConfig: diff --git a/src/graph/validator/AdminJobValidator.h b/src/graph/validator/AdminJobValidator.h index 73ce1452f5d..e576db54c20 100644 --- a/src/graph/validator/AdminJobValidator.h +++ b/src/graph/validator/AdminJobValidator.h @@ -36,9 +36,10 @@ class AdminJobValidator final : public Validator { case meta::cpp2::AdminCmd::STATS: case meta::cpp2::AdminCmd::COMPACT: case meta::cpp2::AdminCmd::FLUSH: + case meta::cpp2::AdminCmd::DATA_BALANCE: + case meta::cpp2::AdminCmd::LEADER_BALANCE: return true; // TODO: Also space related, but not available in CreateJobExcutor now. - case meta::cpp2::AdminCmd::DATA_BALANCE: case meta::cpp2::AdminCmd::DOWNLOAD: case meta::cpp2::AdminCmd::INGEST: case meta::cpp2::AdminCmd::UNKNOWN: diff --git a/src/graph/validator/BalanceValidator.cpp b/src/graph/validator/BalanceValidator.cpp deleted file mode 100644 index 306d8b6a27d..00000000000 --- a/src/graph/validator/BalanceValidator.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/validator/BalanceValidator.h" - -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -Status BalanceValidator::toPlan() { - PlanNode *current = nullptr; - BalanceSentence *sentence = static_cast(sentence_); - switch (sentence->subType()) { - case BalanceSentence::SubType::kLeader: - current = BalanceLeaders::make(qctx_, nullptr); - break; - case BalanceSentence::SubType::kData: { - auto hosts = - sentence->hostDel() == nullptr ? std::vector() : sentence->hostDel()->hosts(); - if (!hosts.empty()) { - auto it = std::unique(hosts.begin(), hosts.end()); - if (it != hosts.end()) { - return Status::SemanticError("Host have duplicated"); - } - } - current = Balance::make(qctx_, nullptr, std::move(hosts)); - break; - } - case BalanceSentence::SubType::kDataStop: - current = StopBalance::make(qctx_, nullptr); - break; - case BalanceSentence::SubType::kDataReset: - current = ResetBalance::make(qctx_, nullptr); - break; - case BalanceSentence::SubType::kShowBalancePlan: - current = ShowBalance::make(qctx_, nullptr, sentence->balanceId()); - break; - case BalanceSentence::SubType::kUnknown: - // fallthrough - default: - DLOG(FATAL) << "Unknown balance kind " << sentence->kind(); - return Status::NotSupported("Unknown balance kind %d", static_cast(sentence->kind())); - } - root_ = current; - tail_ = root_; - return Status::OK(); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/validator/BalanceValidator.h b/src/graph/validator/BalanceValidator.h deleted file mode 100644 index 7766bc30b3f..00000000000 --- a/src/graph/validator/BalanceValidator.h +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_VALIDATOR_BALANCEVALIDATOR_H_ -#define GRAPH_VALIDATOR_BALANCEVALIDATOR_H_ - -#include "graph/validator/Validator.h" -#include "parser/AdminSentences.h" - -namespace nebula { -namespace graph { - -class BalanceValidator final : public Validator { - public: - BalanceValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override { return Status::OK(); } - - Status toPlan() override; -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_VALIDATOR_BALANCEVALIDATOR_H_ diff --git a/src/graph/validator/CMakeLists.txt b/src/graph/validator/CMakeLists.txt index e04f08c1e65..722f3725a36 100644 --- a/src/graph/validator/CMakeLists.txt +++ b/src/graph/validator/CMakeLists.txt @@ -13,7 +13,6 @@ nebula_add_library( UseValidator.cpp GetSubgraphValidator.cpp AdminValidator.cpp - BalanceValidator.cpp AdminJobValidator.cpp MaintainValidator.cpp MutateValidator.cpp diff --git a/src/graph/validator/Validator.cpp b/src/graph/validator/Validator.cpp index 4d3c0830eeb..6c2aba0cfb2 100644 --- a/src/graph/validator/Validator.cpp +++ b/src/graph/validator/Validator.cpp @@ -15,7 +15,6 @@ #include "graph/validator/AdminJobValidator.h" #include "graph/validator/AdminValidator.h" #include "graph/validator/AssignmentValidator.h" -#include "graph/validator/BalanceValidator.h" #include "graph/validator/DownloadValidator.h" #include "graph/validator/ExplainValidator.h" #include "graph/validator/FetchEdgesValidator.h" @@ -132,8 +131,6 @@ std::unique_ptr Validator::makeValidator(Sentence* sentence, QueryCon return std::make_unique(sentence, context); case Sentence::Kind::kShowRoles: return std::make_unique(sentence, context); - case Sentence::Kind::kBalance: - return std::make_unique(sentence, context); case Sentence::Kind::kAdminJob: case Sentence::Kind::kAdminShowJobs: return std::make_unique(sentence, context); diff --git a/src/interface/meta.thrift b/src/interface/meta.thrift index 9418a9d7a6b..f75e5751bf1 100644 --- a/src/interface/meta.thrift +++ b/src/interface/meta.thrift @@ -228,6 +228,7 @@ enum AdminCmd { DATA_BALANCE = 6, DOWNLOAD = 7, INGEST = 8, + LEADER_BALANCE = 9, UNKNOWN = 99, } (cpp.enum_strict) @@ -703,15 +704,6 @@ struct ChangePasswordReq { 3: binary old_encoded_pwd, } -struct BalanceReq { - 1: optional common.GraphSpaceID space_id, - // Specify the balance id to check the status of the related balance plan - 2: optional i64 id, - 3: optional list host_del, - 4: optional bool stop, - 5: optional bool reset, -} - enum TaskResult { SUCCEEDED = 0x00, FAILED = 0x01, @@ -722,18 +714,10 @@ enum TaskResult { struct BalanceTask { 1: binary id, - 2: TaskResult result, -} - -struct BalanceResp { - 1: common.ErrorCode code, - 2: i64 id, - // Valid if code equals E_LEADER_CHANGED. - 3: common.HostAddr leader, - 4: list tasks, -} - -struct LeaderBalanceReq { + 2: binary command, + 3: TaskResult result, + 4: i64 start_time, + 5: i64 stop_time, } enum ConfigModule { @@ -1222,8 +1206,6 @@ service MetaService { ExecResp changePassword(1: ChangePasswordReq req); HBResp heartBeat(1: HBReq req); - BalanceResp balance(1: BalanceReq req); - ExecResp leaderBalance(1: LeaderBalanceReq req); ExecResp regConfig(1: RegConfigReq req); GetConfigResp getConfig(1: GetConfigReq req); diff --git a/src/kvstore/plugins/hbase/test/CMakeLists.txt b/src/kvstore/plugins/hbase/test/CMakeLists.txt index ea6d2e26e8a..8ad5c3f7f12 100644 --- a/src/kvstore/plugins/hbase/test/CMakeLists.txt +++ b/src/kvstore/plugins/hbase/test/CMakeLists.txt @@ -17,9 +17,11 @@ nebula_add_test( $ $ $ + $ $ $ $ + $ LIBRARIES ${ROCKSDB_LIBRARIES} ${THRIFT_LIBRARIES} @@ -46,10 +48,12 @@ nebula_add_test( $ $ $ + $ $ $ $ $ + $ LIBRARIES ${ROCKSDB_LIBRARIES} ${THRIFT_LIBRARIES} diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index e6427ad59ba..e3165b35fdf 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -44,15 +44,12 @@ nebula_add_library( processors/kv/ScanProcessor.cpp processors/admin/HBProcessor.cpp processors/user/AuthenticationProcessor.cpp - processors/admin/BalanceProcessor.cpp processors/admin/CreateSnapshotProcessor.cpp processors/admin/DropSnapshotProcessor.cpp processors/admin/ListSnapshotsProcessor.cpp - processors/admin/Balancer.cpp - processors/admin/BalancePlan.cpp - processors/admin/BalanceTask.cpp + processors/job/BalancePlan.cpp + processors/job/BalanceTask.cpp processors/admin/AdminClient.cpp - processors/admin/LeaderBalanceProcessor.cpp processors/admin/SnapShot.cpp processors/admin/CreateBackupProcessor.cpp processors/admin/RestoreProcessor.cpp diff --git a/src/meta/MetaServiceHandler.cpp b/src/meta/MetaServiceHandler.cpp index a706e4e2ae5..4e58544af0b 100644 --- a/src/meta/MetaServiceHandler.cpp +++ b/src/meta/MetaServiceHandler.cpp @@ -6,13 +6,11 @@ #include "meta/MetaServiceHandler.h" #include "common/utils/MetaKeyUtils.h" -#include "meta/processors/admin/BalanceProcessor.h" #include "meta/processors/admin/CreateBackupProcessor.h" #include "meta/processors/admin/CreateSnapshotProcessor.h" #include "meta/processors/admin/DropSnapshotProcessor.h" #include "meta/processors/admin/GetMetaDirInfoProcessor.h" #include "meta/processors/admin/HBProcessor.h" -#include "meta/processors/admin/LeaderBalanceProcessor.h" #include "meta/processors/admin/ListClusterInfoProcessor.h" #include "meta/processors/admin/ListSnapshotsProcessor.h" #include "meta/processors/admin/RestoreProcessor.h" @@ -378,17 +376,6 @@ folly::Future MetaServiceHandler::future_getUserRoles( RETURN_FUTURE(processor); } -folly::Future MetaServiceHandler::future_balance(const cpp2::BalanceReq& req) { - auto* processor = BalanceProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_leaderBalance( - const cpp2::LeaderBalanceReq& req) { - auto* processor = LeaderBalanceProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - folly::Future MetaServiceHandler::future_regConfig(const cpp2::RegConfigReq& req) { auto* processor = RegConfigProcessor::instance(kvstore_); RETURN_FUTURE(processor); diff --git a/src/meta/MetaServiceHandler.h b/src/meta/MetaServiceHandler.h index a66a01ecdee..2ea463e7c89 100644 --- a/src/meta/MetaServiceHandler.h +++ b/src/meta/MetaServiceHandler.h @@ -154,10 +154,6 @@ class MetaServiceHandler final : public cpp2::MetaServiceSvIf { * */ folly::Future future_heartBeat(const cpp2::HBReq& req) override; - folly::Future future_balance(const cpp2::BalanceReq& req) override; - - folly::Future future_leaderBalance(const cpp2::LeaderBalanceReq& req) override; - folly::Future future_regConfig(const cpp2::RegConfigReq& req) override; folly::Future future_getConfig(const cpp2::GetConfigReq& req) override; diff --git a/src/meta/processors/admin/BalanceProcessor.cpp b/src/meta/processors/admin/BalanceProcessor.cpp deleted file mode 100644 index 1e7e9238e5b..00000000000 --- a/src/meta/processors/admin/BalanceProcessor.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/admin/BalanceProcessor.h" - -#include "meta/processors/admin/Balancer.h" - -namespace nebula { -namespace meta { - -void BalanceProcessor::process(const cpp2::BalanceReq& req) { - if (req.get_space_id() != nullptr) { - LOG(ERROR) << "Unsupport balance for specific space " << *req.get_space_id(); - handleErrorCode(nebula::cpp2::ErrorCode::E_UNSUPPORTED); - onFinished(); - return; - } - - if (req.get_stop() != nullptr) { - if (!(*req.get_stop())) { - handleErrorCode(nebula::cpp2::ErrorCode::E_UNKNOWN); - onFinished(); - return; - } - auto ret = Balancer::instance(kvstore_)->stop(); - if (!nebula::ok(ret)) { - handleErrorCode(nebula::error(ret)); - onFinished(); - return; - } - handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); - resp_.set_id(nebula::value(ret)); - onFinished(); - return; - } - - if (req.get_reset() != nullptr) { - if (!(*req.get_reset())) { - handleErrorCode(nebula::cpp2::ErrorCode::E_UNKNOWN); - onFinished(); - return; - } - auto plan = Balancer::instance(kvstore_)->cleanLastInValidPlan(); - if (!ok(plan)) { - handleErrorCode(error(plan)); - onFinished(); - return; - } - resp_.set_id(value(plan)); - handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); - onFinished(); - return; - } - - if (req.get_id() != nullptr) { - auto ret = Balancer::instance(kvstore_)->show(*req.get_id()); - if (!nebula::ok(ret)) { - auto retCode = nebula::error(ret); - LOG(ERROR) << "Show balance ID failed, error " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - const auto& plan = nebula::value(ret); - std::vector thriftTasks; - for (auto& task : plan.tasks()) { - cpp2::BalanceTask t; - t.set_id(task.taskIdStr()); - switch (task.result()) { - case BalanceTaskResult::SUCCEEDED: - t.set_result(cpp2::TaskResult::SUCCEEDED); - break; - case BalanceTaskResult::FAILED: - t.set_result(cpp2::TaskResult::FAILED); - break; - case BalanceTaskResult::IN_PROGRESS: - t.set_result(cpp2::TaskResult::IN_PROGRESS); - break; - case BalanceTaskResult::INVALID: - t.set_result(cpp2::TaskResult::INVALID); - break; - } - thriftTasks.emplace_back(std::move(t)); - } - resp_.set_tasks(std::move(thriftTasks)); - onFinished(); - return; - } - - auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); - if (!nebula::ok(activeHostsRet)) { - auto retCode = nebula::error(activeHostsRet); - LOG(ERROR) << "Get active hosts failed, error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - auto hosts = std::move(nebula::value(activeHostsRet)); - - if (hosts.empty()) { - LOG(ERROR) << "There is no active hosts"; - handleErrorCode(nebula::cpp2::ErrorCode::E_NO_HOSTS); - onFinished(); - return; - } - - std::vector lostHosts; - if (req.host_del_ref().has_value()) { - lostHosts = *req.host_del_ref(); - } - - auto ret = Balancer::instance(kvstore_)->balance(std::move(lostHosts)); - if (!ok(ret)) { - auto retCode = error(ret); - LOG(ERROR) << "Balance Failed: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - resp_.set_id(value(ret)); - handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); - onFinished(); -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/admin/BalanceProcessor.h b/src/meta/processors/admin/BalanceProcessor.h deleted file mode 100644 index 401dd51242e..00000000000 --- a/src/meta/processors/admin/BalanceProcessor.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_BALANCEPROCESSOR_H_ -#define META_BALANCEPROCESSOR_H_ - -#include - -#include "meta/ActiveHostsMan.h" -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class BalanceProcessor : public BaseProcessor { - public: - static BalanceProcessor* instance(kvstore::KVStore* kvstore) { - return new BalanceProcessor(kvstore); - } - - void process(const cpp2::BalanceReq& req); - - private: - explicit BalanceProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -} // namespace meta -} // namespace nebula - -#endif // META_BALANCEPROCESSOR_H_ diff --git a/src/meta/processors/admin/Balancer.cpp b/src/meta/processors/admin/Balancer.cpp deleted file mode 100644 index 2780ab39911..00000000000 --- a/src/meta/processors/admin/Balancer.cpp +++ /dev/null @@ -1,1232 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/admin/Balancer.h" - -#include - -#include -#include - -#include "common/network/NetworkUtils.h" -#include "common/utils/MetaKeyUtils.h" -#include "kvstore/NebulaStore.h" -#include "meta/ActiveHostsMan.h" -#include "meta/common/MetaCommon.h" -#include "meta/processors/Common.h" - -DEFINE_double(leader_balance_deviation, - 0.05, - "after leader balance, leader count should in range " - "[avg * (1 - deviation), avg * (1 + deviation)]"); - -namespace nebula { -namespace meta { - -ErrorOr Balancer::balance(std::vector&& lostHosts) { - std::lock_guard lg(lock_); - if (!running_) { - auto retCode = recovery(); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Recovery balancer failed!"; - finish(); - return retCode; - } - if (plan_ == nullptr) { - LOG(INFO) << "There is no corrupted plan need to recovery, so create a new one"; - retCode = buildBalancePlan(std::move(lostHosts)); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Create balance plan failed"; - finish(); - return retCode; - } - } - LOG(INFO) << "Start to invoke balance plan " << plan_->id(); - executor_->add(std::bind(&BalancePlan::invoke, plan_.get())); - running_ = true; - return plan_->id(); - } - CHECK(!!plan_); - LOG(INFO) << "Balance plan " << plan_->id() << " is still running"; - return plan_->id(); -} - -ErrorOr Balancer::show(BalanceID id) const { - std::lock_guard lg(lock_); - if (plan_ != nullptr && plan_->id() == id) { - return *plan_; - } - - if (kv_) { - BalancePlan plan(id, kv_, client_); - auto retCode = plan.recovery(false); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get balance plan failed, id " << id; - return retCode; - } - return plan; - } - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; -} - -ErrorOr Balancer::stop() { - std::lock_guard lg(lock_); - if (!running_) { - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } - CHECK(!!plan_); - plan_->stop(); - LOG(INFO) << "Stop balance plan " << plan_->id(); - return plan_->id(); -} - -ErrorOr Balancer::cleanLastInValidPlan() { - std::lock_guard lg(lock_); - auto* store = static_cast(kv_); - if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { - return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; - } - if (running_) { - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; - } - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - // There should be at most one invalid plan, and it must be the latest one - if (iter->valid()) { - auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); - if (status == BalanceStatus::FAILED) { - auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); - folly::Baton baton; - auto result = nebula::cpp2::ErrorCode::SUCCEEDED; - // Only remove the plan will be enough - kv_->asyncMultiRemove(kDefaultSpaceId, - kDefaultPartId, - {iter->key().str()}, - [&baton, &result](nebula::cpp2::ErrorCode code) { - result = code; - baton.post(); - }); - baton.wait(); - if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { - return result; - } - return balanceId; - } - } - return nebula::cpp2::ErrorCode::E_NO_INVALID_BALANCE_PLAN; -} - -nebula::cpp2::ErrorCode Balancer::recovery() { - CHECK(!plan_) << "plan should be nullptr now"; - if (kv_) { - auto* store = static_cast(kv_); - if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { - // We need to check whether is leader or not, otherwise we would failed to - // persist state of BalancePlan and BalanceTask, so we just reject request - // if not leader. - return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; - } - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - std::vector corruptedPlans; - // The balance plan is stored with balance id desc order, there should be at - // most one failed or in_progress plan, and it must be the latest one - if (iter->valid()) { - auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); - if (status == BalanceStatus::IN_PROGRESS || status == BalanceStatus::FAILED) { - auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); - corruptedPlans.emplace_back(balanceId); - } - } - if (corruptedPlans.empty()) { - LOG(INFO) << "No corrupted plan need to recovery!"; - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - CHECK_EQ(1, corruptedPlans.size()); - plan_ = std::make_unique(corruptedPlans[0], kv_, client_); - plan_->onFinished_ = [this]() { - auto self = plan_; - { - std::lock_guard lg(lock_); - if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; - } - finish(); - } - }; - auto recRet = plan_->recovery(); - if (recRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't recovery plan " << corruptedPlans[0]; - return recRet; - } - } - // save the balance plan again because FAILED tasks would be marked as - // IN_PROGRESS again - return plan_->saveInStore(); -} - -nebula::cpp2::ErrorCode Balancer::getAllSpaces( - std::vector>& spaces) { - // Get all spaces - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::spacePrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get all spaces failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - while (iter->valid()) { - auto spaceId = MetaKeyUtils::spaceId(iter->key()); - auto properties = MetaKeyUtils::parseSpace(iter->val()); - bool zoned = properties.group_name_ref().has_value(); - spaces.emplace_back(spaceId, *properties.replica_factor_ref(), zoned); - iter->next(); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -nebula::cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { - if (plan_ != nullptr) { - LOG(ERROR) << "Balance plan should be nullptr now"; - return nebula::cpp2::ErrorCode::E_BALANCED; - } - - std::vector> spaces; - auto spacesRet = getAllSpaces(spaces); - if (spacesRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't get all spaces"; - return spacesRet; - } - - plan_ = std::make_unique(time::WallClock::fastNowInSec(), kv_, client_); - for (const auto& spaceInfo : spaces) { - auto spaceId = std::get<0>(spaceInfo); - auto spaceReplica = std::get<1>(spaceInfo); - auto dependentOnGroup = std::get<2>(spaceInfo); - LOG(INFO) << "Balance Space " << spaceId; - auto taskRet = genTasks(spaceId, spaceReplica, dependentOnGroup, std::move(lostHosts)); - if (!ok(taskRet)) { - LOG(ERROR) << "Generate tasks on space " << std::get<0>(spaceInfo) << " failed"; - return error(taskRet); - } - - auto tasks = std::move(value(taskRet)); - for (auto& task : tasks) { - plan_->addTask(std::move(task)); - } - } - - plan_->onFinished_ = [this]() { - auto self = plan_; - { - std::lock_guard lg(lock_); - if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; - } - finish(); - } - }; - if (plan_->tasks_.empty()) { - return nebula::cpp2::ErrorCode::E_BALANCED; - } - return plan_->saveInStore(); -} - -ErrorOr> Balancer::genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnGroup, - std::vector&& lostHosts) { - HostParts hostParts; - int32_t totalParts = 0; - // hostParts is current part allocation map - auto result = getHostParts(spaceId, dependentOnGroup, hostParts, totalParts); - if (!nebula::ok(result)) { - return nebula::error(result); - } - - auto retVal = nebula::value(result); - if (!retVal || totalParts == 0 || hostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } - - auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); - if (!nebula::ok(fetchHostPartsRet)) { - LOG(ERROR) << "Fetch hosts and parts failed"; - return nebula::error(fetchHostPartsRet); - } - - auto hostPartsRet = nebula::value(fetchHostPartsRet); - auto confirmedHostParts = hostPartsRet.first; - auto activeHosts = hostPartsRet.second; - LOG(INFO) << "Now, try to balance the confirmedHostParts"; - - // We have two parts need to balance, the first one is parts on lost hosts and - // deleted hosts The seconds one is parts on unbalanced host in - // confirmedHostParts. - std::vector tasks; - // 1. Iterate through all hosts that would not be included in - // confirmedHostParts, - // move all parts in them to host with minimum part in confirmedHostParts - for (auto& lostHost : lostHosts) { - auto& lostParts = hostParts[lostHost]; - for (auto& partId : lostParts) { - LOG(INFO) << "Try balance part " << partId << " for lost host " << lostHost; - // check whether any peers which is alive - auto alive = checkReplica(hostParts, activeHosts, spaceReplica, partId); - if (!alive.ok()) { - LOG(ERROR) << "Check Replica failed: " << alive << " Part: " << partId; - return nebula::cpp2::ErrorCode::E_NO_VALID_HOST; - } - - auto retCode = - transferLostHost(tasks, confirmedHostParts, lostHost, spaceId, partId, dependentOnGroup); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; - return retCode; - } - } - } - - // 2. Make all hosts in confirmedHostParts balanced - if (balanceParts(plan_->id_, spaceId, confirmedHostParts, totalParts, tasks, dependentOnGroup)) { - return tasks; - } else { - return nebula::cpp2::ErrorCode::E_BAD_BALANCE_PLAN; - } -} - -nebula::cpp2::ErrorCode Balancer::transferLostHost(std::vector& tasks, - HostParts& confirmedHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup) { - // find a host with minimum parts which doesn't have this part - ErrorOr result; - if (dependentOnGroup) { - result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); - } else { - result = hostWithMinimalParts(confirmedHostParts, partId); - } - - if (!nebula::ok(result)) { - LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; - return nebula::error(result); - } - const auto& targetHost = nebula::value(result); - confirmedHostParts[targetHost].emplace_back(partId); - tasks.emplace_back(plan_->id_, spaceId, partId, source, targetHost, kv_, client_); - zoneParts_[targetHost].second.emplace_back(partId); - auto zoneIt = - std::find(zoneParts_[source].second.begin(), zoneParts_[source].second.end(), partId); - if (zoneIt == zoneParts_[source].second.end()) { - LOG(ERROR) << "part not find " << partId << " at " << source; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -ErrorOr>> -Balancer::fetchHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - const HostParts& hostParts, - std::vector& lostHosts) { - ErrorOr> activeHostsRet; - if (dependentOnGroup) { - activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); - } else { - activeHostsRet = ActiveHostsMan::getActiveHosts(kv_); - } - - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } - - std::vector expand; - auto activeHosts = nebula::value(activeHostsRet); - calDiff(hostParts, activeHosts, expand, lostHosts); - // confirmedHostParts is new part allocation map after balance, it would - // include newlyAdded and exclude lostHosts - HostParts confirmedHostParts(hostParts); - for (const auto& h : expand) { - LOG(INFO) << "Found new host " << h; - confirmedHostParts.emplace(h, std::vector()); - } - for (const auto& h : lostHosts) { - LOG(INFO) << "Lost host " << h; - confirmedHostParts.erase(h); - } - return std::make_pair(confirmedHostParts, activeHosts); -} - -bool Balancer::balanceParts(BalanceID balanceId, - GraphSpaceID spaceId, - HostParts& confirmedHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnGroup) { - auto avgLoad = static_cast(totalParts) / confirmedHostParts.size(); - VLOG(3) << "The expect avg load is " << avgLoad; - int32_t minLoad = std::floor(avgLoad); - int32_t maxLoad = std::ceil(avgLoad); - VLOG(3) << "The min load is " << minLoad << " max load is " << maxLoad; - - auto sortedHosts = sortedHostsByParts(confirmedHostParts); - if (sortedHosts.empty()) { - LOG(ERROR) << "Host is empty"; - return false; - } - - auto maxPartsHost = sortedHosts.back(); - auto minPartsHost = sortedHosts.front(); - auto& sourceHost = maxPartsHost.first; - auto& targetHost = minPartsHost.first; - if (innerBalance_) { - LOG(INFO) << "maxPartsHost.first " << maxPartsHost.first << " minPartsHost.first " - << minPartsHost.first; - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); - } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; - } - totalPartsZone += it->second.size(); - } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; - } - - while (maxPartsHost.second > maxLoad || minPartsHost.second < minLoad) { - auto& partsFrom = confirmedHostParts[maxPartsHost.first]; - auto& partsTo = confirmedHostParts[minPartsHost.first]; - std::sort(partsFrom.begin(), partsFrom.end()); - std::sort(partsTo.begin(), partsTo.end()); - - LOG(INFO) << maxPartsHost.first << ":" << partsFrom.size() << " -> " << minPartsHost.first - << ":" << partsTo.size(); - std::vector diff; - std::set_difference(partsFrom.begin(), - partsFrom.end(), - partsTo.begin(), - partsTo.end(), - std::inserter(diff, diff.begin())); - bool noAction = true; - for (auto& partId : diff) { - LOG(INFO) << "partsFrom size " << partsFrom.size() << " partsTo size " << partsTo.size() - << " minLoad " << minLoad << " maxLoad " << maxLoad; - if (partsFrom.size() == partsTo.size() + 1 || - partsFrom.size() == static_cast(minLoad) || - partsTo.size() == static_cast(maxLoad)) { - VLOG(3) << "No need to move any parts from " << maxPartsHost.first << " to " - << minPartsHost.first; - break; - } - - LOG(INFO) << "[space:" << spaceId << ", part:" << partId << "] " << maxPartsHost.first << "->" - << minPartsHost.first; - auto it = std::find(partsFrom.begin(), partsFrom.end(), partId); - if (it == partsFrom.end()) { - LOG(ERROR) << "Part " << partId << " not found in partsFrom"; - return false; - } - - if (std::find(partsTo.begin(), partsTo.end(), partId) != partsTo.end()) { - LOG(ERROR) << "Part " << partId << " already existed in partsTo"; - return false; - } - - if (dependentOnGroup) { - if (!checkZoneLegal(sourceHost, targetHost)) { - LOG(INFO) << "sourceHost " << sourceHost << " targetHost " << targetHost - << " not same zone"; - - auto& parts = relatedParts_[targetHost]; - auto minIt = std::find(parts.begin(), parts.end(), partId); - if (minIt != parts.end()) { - LOG(INFO) << "Part " << partId << " have existed"; - continue; - } - } - - auto& sourceNoneName = zoneParts_[sourceHost].first; - auto sourceHosts = zoneHosts_.find(sourceNoneName); - for (auto& sh : sourceHosts->second) { - auto& parts = relatedParts_[sh]; - auto maxIt = std::find(parts.begin(), parts.end(), partId); - if (maxIt == parts.end()) { - LOG(INFO) << "Part " << partId << " not found on " << sh; - continue; - } - parts.erase(maxIt); - } - - auto& targetNoneName = zoneParts_[targetHost].first; - auto targetHosts = zoneHosts_.find(targetNoneName); - for (auto& th : targetHosts->second) { - relatedParts_[th].emplace_back(partId); - } - } - - partsFrom.erase(it); - partsTo.emplace_back(partId); - tasks.emplace_back( - balanceId, spaceId, partId, maxPartsHost.first, minPartsHost.first, kv_, client_); - noAction = false; - } - - if (noAction) { - LOG(INFO) << "Here is no action"; - break; - } - sortedHosts = sortedHostsByParts(confirmedHostParts); - maxPartsHost = sortedHosts.back(); - minPartsHost = sortedHosts.front(); - if (innerBalance_) { - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); - } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; - } - totalPartsZone += it->second.size(); - } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; - } - } - LOG(INFO) << "Balance tasks num: " << tasks.size(); - for (auto& task : tasks) { - LOG(INFO) << task.taskIdStr(); - } - - relatedParts_.clear(); - return true; -} - -ErrorOr Balancer::getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << " " - << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - while (iter->valid()) { - auto key = iter->key(); - PartitionID partId; - memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); - auto partHosts = MetaKeyUtils::parsePartVal(iter->val()); - for (auto& ph : partHosts) { - hostParts[ph].emplace_back(partId); - } - totalParts++; - iter->next(); - } - - LOG(INFO) << "Host size: " << hostParts.size(); - auto key = MetaKeyUtils::spaceKey(spaceId); - std::string value; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &value); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId - << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto properties = MetaKeyUtils::parseSpace(value); - if (totalParts != properties.get_partition_num()) { - LOG(ERROR) << "Partition number not equals"; - LOG(ERROR) << totalParts << " : " << properties.get_partition_num(); - return false; - } - - int32_t replica = properties.get_replica_factor(); - LOG(INFO) << "Replica " << replica; - if (dependentOnGroup && properties.group_name_ref().has_value()) { - auto groupName = *properties.group_name_ref(); - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - int32_t zoneSize = MetaKeyUtils::parseZoneNames(std::move(groupValue)).size(); - LOG(INFO) << "Zone Size " << zoneSize; - innerBalance_ = (replica == zoneSize); - - auto activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } - - std::vector expand; - auto activeHosts = nebula::value(activeHostsRet); - std::vector lostHosts; - calDiff(hostParts, activeHosts, expand, lostHosts); - // confirmedHostParts is new part allocation map after balance, it would include newlyAdded - // and exclude lostHosts - HostParts confirmedHostParts(hostParts); - for (const auto& h : expand) { - LOG(INFO) << "Found new host " << h; - confirmedHostParts.emplace(h, std::vector()); - } - for (const auto& h : lostHosts) { - LOG(INFO) << "Lost host " << h; - confirmedHostParts.erase(h); - } - - auto zonePartsRet = assembleZoneParts(groupName, confirmedHostParts); - if (zonePartsRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; - return zonePartsRet; - } - } - - totalParts *= replica; - return true; -} - -nebula::cpp2::ErrorCode Balancer::assembleZoneParts(const std::string& groupName, - HostParts& hostParts) { - LOG(INFO) << "Balancer assembleZoneParts"; - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - // zoneHosts use to record this host belong to zone's hosts - std::unordered_map, std::vector> zoneHosts; - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); - for (auto zoneName : zoneNames) { - LOG(INFO) << "Zone Name: " << zoneName; - auto zoneKey = MetaKeyUtils::zoneKey(zoneName); - std::string zoneValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); - for (const auto& host : hosts) { - LOG(INFO) << "Host for zone " << host; - auto pair = std::pair(std::move(host), zoneName); - auto& hs = zoneHosts[std::move(pair)]; - hs.insert(hs.end(), hosts.begin(), hosts.end()); - } - } - - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - auto host = it->first; - LOG(INFO) << "Host: " << host; - auto zoneIter = - std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { - return host == pair.first.first; - }); - - if (zoneIter == zoneHosts.end()) { - LOG(INFO) << it->first << " have lost"; - continue; - } - - auto& hosts = zoneIter->second; - auto name = zoneIter->first.second; - zoneHosts_[name] = hosts; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto partIter = hostParts.find(*hostIter); - LOG(INFO) << "Zone " << name << " have the host " << it->first; - if (partIter == hostParts.end()) { - zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); - } else { - zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); - } - } - } - - for (auto it = zoneHosts.begin(); it != zoneHosts.end(); it++) { - auto host = it->first.first; - auto& hosts = it->second; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto h = *hostIter; - auto iter = std::find_if(hostParts.begin(), hostParts.end(), [h](const auto& pair) -> bool { - return h == pair.first; - }); - - if (iter == hostParts.end()) { - continue; - } - - auto& parts = iter->second; - auto& hp = relatedParts_[host]; - hp.insert(hp.end(), parts.begin(), parts.end()); - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -void Balancer::calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& expand, - std::vector& lost) { - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - VLOG(1) << "Original Host " << it->first << ", parts " << it->second.size(); - if (std::find(activeHosts.begin(), activeHosts.end(), it->first) == activeHosts.end() && - std::find(lost.begin(), lost.end(), it->first) == lost.end()) { - lost.emplace_back(it->first); - } - } - for (auto& h : activeHosts) { - VLOG(1) << "Active host " << h; - if (hostParts.find(h) == hostParts.end()) { - expand.emplace_back(h); - } - } -} - -std::vector> Balancer::sortedHostsByParts(const HostParts& hostParts) { - std::vector> hosts; - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - LOG(INFO) << "Host " << it->first << " parts " << it->second.size(); - hosts.emplace_back(it->first, it->second.size()); - } - std::sort(hosts.begin(), hosts.end(), [](const auto& l, const auto& r) { - if (l.second != r.second) { - return l.second < r.second; - } else { - return l.first.host < r.first.host; - } - }); - return hosts; -} - -Status Balancer::checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId) { - // check host hold the part and alive - auto checkPart = [&](const auto& entry) { - auto& host = entry.first; - auto& parts = entry.second; - return std::find(parts.begin(), parts.end(), partId) != parts.end() && - std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); - }; - auto aliveReplica = std::count_if(hostParts.begin(), hostParts.end(), checkPart); - if (aliveReplica >= replica / 2 + 1) { - return Status::OK(); - } - return Status::Error("Not enough alive host hold the part %d", partId); -} - -ErrorOr Balancer::hostWithMinimalParts( - const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; - } - - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; - } - } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; -} - -ErrorOr Balancer::hostWithMinimalPartsForZone( - const HostAddr& source, const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; - } - - LOG(INFO) << "source " << source << " h.first " << h.first; - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; - } - } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; -} - -nebula::cpp2::ErrorCode Balancer::leaderBalance() { - if (running_) { - LOG(INFO) << "Balance process still running"; - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; - } - - folly::Promise promise; - auto future = promise.getFuture(); - // Space ID, Replica Factor and Dependent On Group - std::vector> spaces; - auto ret = getAllSpaces(spaces); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't get spaces"; - // TODO unify error code - if (ret != nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { - ret = nebula::cpp2::ErrorCode::E_STORE_FAILURE; - } - return ret; - } - - bool expected = false; - if (inLeaderBalance_.compare_exchange_strong(expected, true)) { - hostLeaderMap_.reset(new HostLeaderMap); - auto status = client_->getLeaderDist(hostLeaderMap_.get()).get(); - if (!status.ok() || hostLeaderMap_->empty()) { - LOG(ERROR) << "Get leader distribution failed"; - inLeaderBalance_ = false; - return nebula::cpp2::ErrorCode::E_RPC_FAILURE; - } - - std::vector> futures; - for (const auto& spaceInfo : spaces) { - auto spaceId = std::get<0>(spaceInfo); - auto replicaFactor = std::get<1>(spaceInfo); - auto dependentOnGroup = std::get<2>(spaceInfo); - LeaderBalancePlan plan; - auto balanceResult = buildLeaderBalancePlan( - hostLeaderMap_.get(), spaceId, replicaFactor, dependentOnGroup, plan); - if (!nebula::ok(balanceResult) || !nebula::value(balanceResult)) { - LOG(ERROR) << "Building leader balance plan failed " - << "Space: " << spaceId; - continue; - } - simplifyLeaderBalnacePlan(spaceId, plan); - for (const auto& task : plan) { - futures.emplace_back(client_->transLeader(std::get<0>(task), - std::get<1>(task), - std::move(std::get<2>(task)), - std::move(std::get<3>(task)))); - } - } - - int32_t failed = 0; - folly::collectAll(futures) - .via(executor_.get()) - .thenTry([&](const auto& result) { - auto tries = result.value(); - for (const auto& t : tries) { - if (!t.value().ok()) { - ++failed; - } - } - }) - .wait(); - - inLeaderBalance_ = false; - if (failed != 0) { - LOG(ERROR) << failed << " partiton failed to transfer leader"; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; -} - -ErrorOr Balancer::buildLeaderBalancePlan( - HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation) { - PartAllocation peersMap; - HostParts leaderHostParts; - size_t leaderParts = 0; - // store peers of all paritions in peerMap - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << static_cast(retCode); - return retCode; - } - - while (iter->valid()) { - auto key = iter->key(); - PartitionID partId; - memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); - auto peers = MetaKeyUtils::parsePartVal(iter->val()); - peersMap[partId] = std::move(peers); - ++leaderParts; - iter->next(); - } - - int32_t totalParts = 0; - HostParts allHostParts; - auto result = getHostParts(spaceId, dependentOnGroup, allHostParts, totalParts); - if (!nebula::ok(result)) { - return nebula::error(result); - } else { - auto retVal = nebula::value(result); - if (!retVal || totalParts == 0 || allHostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return false; - } - } - - std::unordered_set activeHosts; - for (const auto& host : *hostLeaderMap) { - // only balance leader between hosts which have valid partition - if (!allHostParts[host.first].empty()) { - activeHosts.emplace(host.first); - leaderHostParts[host.first] = (*hostLeaderMap)[host.first][spaceId]; - } - } - - if (activeHosts.empty()) { - LOG(ERROR) << "No active hosts"; - return false; - } - - if (dependentOnGroup) { - for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { - auto min = it->second.size() / replicaFactor; - VLOG(3) << "Host: " << it->first << " Bounds: " << min << " : " << min + 1; - hostBounds_[it->first] = std::make_pair(min, min + 1); - } - } else { - size_t activeSize = activeHosts.size(); - size_t globalAvg = leaderParts / activeSize; - size_t globalMin = globalAvg; - size_t globalMax = globalAvg; - if (leaderParts % activeSize != 0) { - globalMax += 1; - } - - if (useDeviation) { - globalMin = std::ceil(static_cast(leaderParts) / activeSize * - (1 - FLAGS_leader_balance_deviation)); - globalMax = std::floor(static_cast(leaderParts) / activeSize * - (1 + FLAGS_leader_balance_deviation)); - } - VLOG(3) << "Build leader balance plan, expected min load: " << globalMin - << ", max load: " << globalMax << " avg: " << globalAvg; - - for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { - hostBounds_[it->first] = std::make_pair(globalMin, globalMax); - } - } - - while (true) { - int32_t taskCount = 0; - bool hasUnbalancedHost = false; - for (const auto& hostEntry : leaderHostParts) { - auto host = hostEntry.first; - auto& hostMinLoad = hostBounds_[host].first; - auto& hostMaxLoad = hostBounds_[host].second; - int32_t partSize = hostEntry.second.size(); - if (hostMinLoad <= partSize && partSize <= hostMaxLoad) { - VLOG(3) << partSize << " is between min load " << hostMinLoad << " and max load " - << hostMaxLoad; - continue; - } - - hasUnbalancedHost = true; - if (partSize < hostMinLoad) { - // need to acquire leader from other hosts - LOG(INFO) << "Acquire leaders to host: " << host << " loading: " << partSize - << " min loading " << hostMinLoad; - taskCount += acquireLeaders( - allHostParts, leaderHostParts, peersMap, activeHosts, host, plan, spaceId); - } else { - // need to transfer leader to other hosts - LOG(INFO) << "Giveup leaders from host: " << host << " loading: " << partSize - << " max loading " << hostMaxLoad; - taskCount += giveupLeaders(leaderHostParts, peersMap, activeHosts, host, plan, spaceId); - } - } - - // If every host is balanced or no more task during this loop, then the plan - // is done - if (!hasUnbalancedHost || taskCount == 0) { - LOG(INFO) << "Not need balance"; - break; - } - } - return true; -} - -int32_t Balancer::acquireLeaders(HostParts& allHostParts, - HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& target, - LeaderBalancePlan& plan, - GraphSpaceID spaceId) { - // host will loop for the partition which is not leader, and try to acuire the - // leader - int32_t taskCount = 0; - std::vector diff; - std::set_difference(allHostParts[target].begin(), - allHostParts[target].end(), - leaderHostParts[target].begin(), - leaderHostParts[target].end(), - std::back_inserter(diff)); - auto& targetLeaders = leaderHostParts[target]; - size_t minLoad = hostBounds_[target].first; - for (const auto& partId : diff) { - VLOG(3) << "Try acquire leader for part " << partId; - // find the leader of partId - auto sources = peersMap[partId]; - for (const auto& source : sources) { - if (source == target || !activeHosts.count(source)) { - continue; - } - - // if peer is the leader of partId and can transfer, then transfer it to - // host - auto& sourceLeaders = leaderHostParts[source]; - VLOG(3) << "Check peer: " << source << " min load: " << minLoad - << " peerLeaders size: " << sourceLeaders.size(); - auto it = std::find(sourceLeaders.begin(), sourceLeaders.end(), partId); - if (it != sourceLeaders.end() && minLoad < sourceLeaders.size()) { - sourceLeaders.erase(it); - targetLeaders.emplace_back(partId); - plan.emplace_back(spaceId, partId, source, target); - LOG(INFO) << "acquire plan trans leader space: " << spaceId << " part: " << partId - << " from " << source.host << ":" << source.port << " to " << target.host << ":" - << target.port; - ++taskCount; - break; - } - } - - // if host has enough leader, just return - if (targetLeaders.size() == minLoad) { - LOG(INFO) << "Host: " << target << "'s leader reach " << minLoad; - break; - } - } - return taskCount; -} - -int32_t Balancer::giveupLeaders(HostParts& leaderParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& source, - LeaderBalancePlan& plan, - GraphSpaceID spaceId) { - int32_t taskCount = 0; - auto& sourceLeaders = leaderParts[source]; - size_t maxLoad = hostBounds_[source].second; - - // host will try to transfer the extra leaders to other peers - for (auto it = sourceLeaders.begin(); it != sourceLeaders.end();) { - // find the leader of partId - auto partId = *it; - const auto& targets = peersMap[partId]; - bool isErase = false; - - // leader should move to the peer with lowest loading - auto target = - std::min_element(targets.begin(), targets.end(), [&](const auto& l, const auto& r) -> bool { - if (source == l || !activeHosts.count(l)) { - return false; - } - return leaderParts[l].size() < leaderParts[r].size(); - }); - - // If peer can accept this partition leader, than host will transfer to the - // peer - if (target != targets.end()) { - auto& targetLeaders = leaderParts[*target]; - int32_t targetLeaderSize = targetLeaders.size(); - if (targetLeaderSize < hostBounds_[*target].second) { - it = sourceLeaders.erase(it); - targetLeaders.emplace_back(partId); - plan.emplace_back(spaceId, partId, source, *target); - LOG(INFO) << "giveup plan trans leader space: " << spaceId << " part: " << partId - << " from " << source.host << ":" << source.port << " to " << target->host << ":" - << target->port; - ++taskCount; - isErase = true; - } - } - - // if host has enough leader, just return - if (sourceLeaders.size() == maxLoad) { - LOG(INFO) << "Host: " << source << "'s leader reach " << maxLoad; - break; - } - - if (!isErase) { - ++it; - } - } - return taskCount; -} - -void Balancer::simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan) { - // Within a leader balance plan, a partition may be moved several times, but - // actually we only need to transfer the leadership of a partition from the - // first host to the last host, and ignore the intermediate ones - std::unordered_map buckets; - for (auto& task : plan) { - buckets[std::get<1>(task)].emplace_back(task); - } - plan.clear(); - for (const auto& partEntry : buckets) { - plan.emplace_back(spaceId, - partEntry.first, - std::get<2>(partEntry.second.front()), - std::get<3>(partEntry.second.back())); - } -} - -nebula::cpp2::ErrorCode Balancer::collectZoneParts(const std::string& groupName, - HostParts& hostParts) { - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - // zoneHosts use to record this host belong to zone's hosts - std::unordered_map, std::vector> zoneHosts; - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); - for (auto zoneName : zoneNames) { - auto zoneKey = MetaKeyUtils::zoneKey(zoneName); - std::string zoneValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); - for (const auto& host : hosts) { - auto pair = std::pair(std::move(host), zoneName); - auto& hs = zoneHosts[std::move(pair)]; - hs.insert(hs.end(), hosts.begin(), hosts.end()); - } - } - - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - auto host = it->first; - auto zoneIter = - std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { - return host == pair.first.first; - }); - - if (zoneIter == zoneHosts.end()) { - LOG(INFO) << it->first << " have lost"; - continue; - } - - auto& hosts = zoneIter->second; - auto name = zoneIter->first.second; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto partIter = hostParts.find(*hostIter); - if (partIter == hostParts.end()) { - zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); - } else { - zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); - } - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -bool Balancer::checkZoneLegal(const HostAddr& source, const HostAddr& target) { - VLOG(3) << "Check " << source << " : " << target; - auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - if (sourceIter == zoneParts_.end()) { - LOG(INFO) << "Source " << source << " not found"; - return false; - } - - auto targetIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&target](const auto& pair) { - return target == pair.first; - }); - - if (targetIter == zoneParts_.end()) { - LOG(INFO) << "Target " << target << " not found"; - return false; - } - - LOG(INFO) << sourceIter->second.first << " : " << targetIter->second.first; - return sourceIter->second.first == targetIter->second.first; -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/admin/Balancer.h b/src/meta/processors/admin/Balancer.h deleted file mode 100644 index ddcb0df039d..00000000000 --- a/src/meta/processors/admin/Balancer.h +++ /dev/null @@ -1,269 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_ADMIN_BALANCER_H_ -#define META_ADMIN_BALANCER_H_ - -#include -#include - -#include "common/network/NetworkUtils.h" -#include "common/time/WallClock.h" -#include "kvstore/KVStore.h" -#include "meta/processors/admin/AdminClient.h" -#include "meta/processors/admin/BalancePlan.h" -#include "meta/processors/admin/BalanceTask.h" - -namespace nebula { -namespace meta { - -using HostParts = std::unordered_map>; -using PartAllocation = std::unordered_map>; -using LeaderBalancePlan = std::vector>; -using ZoneNameAndParts = std::pair>; - -/** -There are two interfaces public: - * Balance: it will construct a balance plan and invoked it. If last balance -plan is not succeeded, it will - * try to resume it. - * - * Rollback: In many cases, if some plan failed forever, we call this interface -to rollback. - -Some notes: -1. Balance will generate balance plan according to current active hosts and -parts allocation -2. For the plan, we hope after moving the least parts , it will reach a -reasonable state. -3. Only one balance plan could be invoked at the same time. -4. Each balance plan has one id, and we could show the status by "balance id" -command and after FO, we could resume the balance plan by type "balance" again. -5. Each balance plan contains many balance tasks, the task represents the -minimum movement unit. -6. We save the whole balancePlan state in kvstore to do failover. -7. Each balance task contains serval steps. And it should be executed step by -step. -8. One task failed will result in the whole balance plan failed. -9. Currently, we hope tasks for the same part could be invoked serially - * */ -class Balancer { - FRIEND_TEST(BalanceTest, BalancePartsTest); - FRIEND_TEST(BalanceTest, NormalTest); - FRIEND_TEST(BalanceTest, SimpleTestWithZone); - FRIEND_TEST(BalanceTest, SpecifyHostTest); - FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); - FRIEND_TEST(BalanceTest, MockReplaceMachineTest); - FRIEND_TEST(BalanceTest, SingleReplicaTest); - FRIEND_TEST(BalanceTest, TryToRecoveryTest); - FRIEND_TEST(BalanceTest, RecoveryTest); - FRIEND_TEST(BalanceTest, StopPlanTest); - FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceTest); - FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); - FRIEND_TEST(BalanceTest, ExpansionZoneTest); - FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); - FRIEND_TEST(BalanceTest, ShrinkZoneTest); - FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); - FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); - FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); - FRIEND_TEST(BalanceIntegrationTest, BalanceTest); - - public: - static Balancer* instance(kvstore::KVStore* kv) { - static std::unique_ptr client(new AdminClient(kv)); - static std::unique_ptr balancer(new Balancer(kv, client.get())); - return balancer.get(); - } - - ~Balancer() = default; - - /* - * Return Error if reject the balance request, otherwise return balance id. - * */ - ErrorOr balance(std::vector&& lostHosts = {}); - - /** - * Show balance plan id status. - * */ - ErrorOr show(BalanceID id) const; - - /** - * Stop balance plan by canceling all waiting balance task. - * */ - ErrorOr stop(); - - /** - * Clean invalid plan, return the invalid plan key if any - * */ - ErrorOr cleanLastInValidPlan(); - - /** - * TODO(heng): rollback some balance plan. - */ - Status rollback(BalanceID id) { return Status::Error("unplemented, %ld", id); } - - /** - * TODO(heng): Execute balance plan from outside. - * */ - Status execute(BalancePlan plan) { - UNUSED(plan); - return Status::Error("Unsupport it yet!"); - } - - /** - * TODO(heng): Execute specific balance plan by id. - * */ - Status execute(BalanceID id) { - UNUSED(id); - return Status::Error("Unsupport it yet!"); - } - - nebula::cpp2::ErrorCode leaderBalance(); - - void finish() { - CHECK(!lock_.try_lock()); - plan_.reset(); - running_ = false; - } - - bool isRunning() { - std::lock_guard lg(lock_); - return running_; - } - - private: - Balancer(kvstore::KVStore* kv, AdminClient* client) : kv_(kv), client_(client) { - executor_.reset(new folly::CPUThreadPoolExecutor(1)); - } - /* - * When the balancer failover, we should recovery the status. - * */ - nebula::cpp2::ErrorCode recovery(); - - /** - * Build balance plan and save it in kvstore. - * */ - nebula::cpp2::ErrorCode buildBalancePlan(std::vector&& lostHosts); - - ErrorOr> genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnGroup, - std::vector&& lostHosts); - - ErrorOr>> fetchHostParts( - GraphSpaceID spaceId, - bool dependentOnGroup, - const HostParts& hostParts, - std::vector& lostHosts); - - ErrorOr getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts); - - nebula::cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); - - void calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& newlyAdded, - std::vector& lost); - - Status checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId); - - ErrorOr hostWithMinimalParts(const HostParts& hostParts, - PartitionID partId); - - ErrorOr hostWithMinimalPartsForZone(const HostAddr& source, - const HostParts& hostParts, - PartitionID partId); - - bool balanceParts(BalanceID balanceId, - GraphSpaceID spaceId, - HostParts& newHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnGroup); - - nebula::cpp2::ErrorCode transferLostHost(std::vector& tasks, - HostParts& newHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup); - - std::vector> sortedHostsByParts(const HostParts& hostParts); - - nebula::cpp2::ErrorCode getAllSpaces( - std::vector>& spaces); - - ErrorOr buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation = true); - - void simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); - - int32_t acquireLeaders(HostParts& allHostParts, - HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& target, - LeaderBalancePlan& plan, - GraphSpaceID spaceId); - - int32_t giveupLeaders(HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& source, - LeaderBalancePlan& plan, - GraphSpaceID spaceId); - - nebula::cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); - - bool checkZoneLegal(const HostAddr& source, const HostAddr& target); - - private: - std::atomic_bool running_{false}; - kvstore::KVStore* kv_{nullptr}; - AdminClient* client_{nullptr}; - // Current running plan. - std::shared_ptr plan_{nullptr}; - std::unique_ptr executor_; - std::atomic_bool inLeaderBalance_{false}; - - // Host => Graph => Partitions - std::unique_ptr hostLeaderMap_; - mutable std::mutex lock_; - - std::unordered_map> hostBounds_; - - // TODO: (darion) nesting map maybe better - std::unordered_map zoneParts_; - std::unordered_map> zoneHosts_; - - // if the space dependent on group, it use to record the partition - // contained in the zone related to the node. - std::unordered_map> relatedParts_; - - bool innerBalance_ = false; -}; - -} // namespace meta -} // namespace nebula - -#endif // META_ADMIN_BALANCER_H_ diff --git a/src/meta/processors/admin/LeaderBalanceProcessor.cpp b/src/meta/processors/admin/LeaderBalanceProcessor.cpp deleted file mode 100644 index 893bdad5d23..00000000000 --- a/src/meta/processors/admin/LeaderBalanceProcessor.cpp +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/admin/LeaderBalanceProcessor.h" - -#include "meta/processors/admin/Balancer.h" - -namespace nebula { -namespace meta { - -void LeaderBalanceProcessor::process(const cpp2::LeaderBalanceReq&) { - auto ret = Balancer::instance(kvstore_)->leaderBalance(); - handleErrorCode(ret); - onFinished(); -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/admin/LeaderBalanceProcessor.h b/src/meta/processors/admin/LeaderBalanceProcessor.h deleted file mode 100644 index be1184bb660..00000000000 --- a/src/meta/processors/admin/LeaderBalanceProcessor.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_LEADERCOUNTPROCESSOR_H_ -#define META_LEADERCOUNTPROCESSOR_H_ - -#include - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class LeaderBalanceProcessor : public BaseProcessor { - public: - static LeaderBalanceProcessor* instance(kvstore::KVStore* kvstore) { - return new LeaderBalanceProcessor(kvstore); - } - - void process(const cpp2::LeaderBalanceReq& req); - - private: - explicit LeaderBalanceProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -} // namespace meta -} // namespace nebula - -#endif // META_LEADERCOUNTPROCESSOR_H_ diff --git a/src/meta/processors/job/AdminJobProcessor.cpp b/src/meta/processors/job/AdminJobProcessor.cpp index 3d5cced1324..950a34d18dc 100644 --- a/src/meta/processors/job/AdminJobProcessor.cpp +++ b/src/meta/processors/job/AdminJobProcessor.cpp @@ -111,7 +111,14 @@ void AdminJobProcessor::process(const cpp2::AdminJobReq& req) { break; } case nebula::meta::cpp2::AdminJobOp::RECOVER: { - auto ret = jobMgr->recoverJob(req.get_paras().back()); + const std::vector& paras = req.get_paras(); + const std::string& spaceName = req.get_paras().back(); + std::vector jobIds; + jobIds.reserve(paras.size() - 1); + for (size_t i = 0; i < paras.size() - 1; i++) { + jobIds.push_back(std::stoi(paras[i])); + } + auto ret = jobMgr->recoverJob(spaceName, adminClient_, jobIds); if (nebula::ok(ret)) { result.set_recovered_job_num(nebula::value(ret)); } else { diff --git a/src/meta/processors/job/BalanceJobExecutor.cpp b/src/meta/processors/job/BalanceJobExecutor.cpp index 42a30065b70..81ee4aa6231 100644 --- a/src/meta/processors/job/BalanceJobExecutor.cpp +++ b/src/meta/processors/job/BalanceJobExecutor.cpp @@ -5,22 +5,40 @@ #include "meta/processors/job/BalanceJobExecutor.h" +#include + #include "common/utils/MetaKeyUtils.h" +#include "kvstore/NebulaStore.h" +#include "meta/processors/job/JobUtils.h" + +DEFINE_double(leader_balance_deviation, + 0.05, + "after leader balance, leader count should in range " + "[avg * (1 - deviation), avg * (1 + deviation)]"); namespace nebula { namespace meta { - +std::atomic_bool BalanceJobExecutor::running_ = false; +std::atomic_bool LeaderBalanceJobExecutor::inLeaderBalance_ = false; +std::unique_ptr DataBalanceJobExecutor::plan_ = nullptr; +std::mutex BalanceJobExecutor::lock_; BalanceJobExecutor::BalanceJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : MetaJobExecutor(jobId, kvstore, adminClient, paras) {} + : MetaJobExecutor(jobId, kvstore, adminClient, paras) { + executor_.reset(new folly::CPUThreadPoolExecutor(1)); + toHost_ = TargetHosts::NONE; +} -bool BalanceJobExecutor::check() { return false; } +bool BalanceJobExecutor::check() { return !paras_.empty(); } nebula::cpp2::ErrorCode BalanceJobExecutor::prepare() { return nebula::cpp2::ErrorCode::SUCCEEDED; } -nebula::cpp2::ErrorCode BalanceJobExecutor::stop() { return nebula::cpp2::ErrorCode::SUCCEEDED; } +nebula::cpp2::ErrorCode BalanceJobExecutor::stop() { + stopped_ = true; + return nebula::cpp2::ErrorCode::SUCCEEDED; +} folly::Future BalanceJobExecutor::executeInternal(HostAddr&& address, std::vector&& parts) { @@ -29,5 +47,1186 @@ folly::Future BalanceJobExecutor::executeInternal(HostAddr&& address, return Status::OK(); } +bool BalanceJobExecutor::runInMeta() { return true; } + +nebula::cpp2::ErrorCode BalanceJobExecutor::recovery() { + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::recovery() { + if (kvstore_ == nullptr) { + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + auto* store = static_cast(kvstore_); + if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { + // We need to check whether is leader or not, otherwise we would failed to + // persist state of BalancePlan and BalanceTask, so we just reject request + // if not leader. + return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; + } + auto jobKey = JobDescription::makeJobKey(jobId_); + std::string value; + auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, jobKey, &value); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + auto optJobRet = JobDescription::makeJobDescription(jobKey, value); + auto optJob = nebula::value(optJobRet); + plan_.reset(new BalancePlan(optJob, kvstore_, adminClient_)); + plan_->onFinished_ = [this]() { + std::lock_guard lg(lock_); + if (LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()) != + nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; + } + finishInternal(); + }; + auto recRet = plan_->recovery(); + if (recRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + plan_.reset(nullptr); + return recRet; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode BalanceJobExecutor::finish(bool ret) { + UNUSED(ret); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode BalanceJobExecutor::getAllSpaces( + std::vector>& spaces) { + // Get all spaces + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::spacePrefix(); + std::unique_ptr iter; + auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get all spaces failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + while (iter->valid()) { + auto spaceId = MetaKeyUtils::spaceId(iter->key()); + auto properties = MetaKeyUtils::parseSpace(iter->val()); + bool zoned = properties.group_name_ref().has_value(); + spaces.emplace_back(spaceId, *properties.replica_factor_ref(), zoned); + iter->next(); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode BalanceJobExecutor::save(const std::string& k, const std::string& v) { + std::vector data{std::make_pair(k, v)}; + folly::Baton baton; + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + kvstore_->asyncMultiPut( + kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { + rc = code; + baton.post(); + }); + baton.wait(); + return rc; +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::buildBalancePlan() { + if (plan_ != nullptr) { + LOG(ERROR) << "Balance plan should be nullptr now"; + return nebula::cpp2::ErrorCode::E_BALANCED; + } + std::vector> spaces; + auto spacesRet = getAllSpaces(spaces); + if (spacesRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't get all spaces"; + return spacesRet; + } + + plan_.reset(new BalancePlan(jobDescription_, kvstore_, adminClient_)); + for (const auto& spaceInfo : spaces) { + auto spaceId = std::get<0>(spaceInfo); + auto spaceReplica = std::get<1>(spaceInfo); + auto dependentOnGroup = std::get<2>(spaceInfo); + LOG(INFO) << "Balance Space " << spaceId; + auto taskRet = genTasks(spaceId, spaceReplica, dependentOnGroup, lostHosts_); + if (!ok(taskRet)) { + LOG(ERROR) << "Generate tasks on space " << std::get<0>(spaceInfo) << " failed"; + return error(taskRet); + } + + auto tasks = std::move(value(taskRet)); + for (auto& task : tasks) { + plan_->addTask(std::move(task)); + } + } + + plan_->onFinished_ = [this]() { + std::lock_guard lg(lock_); + if (LastUpdateTimeMan::update(kvstore_, time::WallClock::fastNowInMilliSec()) != + nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; + } + finishInternal(); + }; + if (plan_->tasks_.empty()) { + return nebula::cpp2::ErrorCode::E_BALANCED; + } + return plan_->saveInStore(); +} + +ErrorOr> DataBalanceJobExecutor::genTasks( + GraphSpaceID spaceId, + int32_t spaceReplica, + bool dependentOnGroup, + std::vector& lostHosts) { + HostParts hostParts; + int32_t totalParts = 0; + // hostParts is current part allocation map + auto result = getHostParts(spaceId, dependentOnGroup, hostParts, totalParts); + if (!nebula::ok(result)) { + return nebula::error(result); + } + + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || hostParts.empty()) { + LOG(ERROR) << "Invalid space " << spaceId; + return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; + } + + auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); + if (!nebula::ok(fetchHostPartsRet)) { + LOG(ERROR) << "Fetch hosts and parts failed"; + return nebula::error(fetchHostPartsRet); + } + + auto hostPartsRet = nebula::value(fetchHostPartsRet); + auto confirmedHostParts = hostPartsRet.first; + auto activeHosts = hostPartsRet.second; + LOG(INFO) << "Now, try to balance the confirmedHostParts"; + + // We have two parts need to balance, the first one is parts on lost hosts and + // deleted hosts The seconds one is parts on unbalanced host in + // confirmedHostParts. + std::vector tasks; + // 1. Iterate through all hosts that would not be included in + // confirmedHostParts, + // move all parts in them to host with minimum part in confirmedHostParts + for (auto& lostHost : lostHosts) { + auto& lostParts = hostParts[lostHost]; + for (auto& partId : lostParts) { + LOG(INFO) << "Try balance part " << partId << " for lost host " << lostHost; + // check whether any peers which is alive + auto alive = checkReplica(hostParts, activeHosts, spaceReplica, partId); + if (!alive.ok()) { + LOG(ERROR) << "Check Replica failed: " << alive << " Part: " << partId; + return nebula::cpp2::ErrorCode::E_NO_VALID_HOST; + } + + auto retCode = + transferLostHost(tasks, confirmedHostParts, lostHost, spaceId, partId, dependentOnGroup); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; + return retCode; + } + } + } + + // 2. Make all hosts in confirmedHostParts balanced + if (balanceParts(spaceId, confirmedHostParts, totalParts, tasks, dependentOnGroup)) { + return tasks; + } else { + return nebula::cpp2::ErrorCode::E_BAD_BALANCE_PLAN; + } +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::transferLostHost(std::vector& tasks, + HostParts& confirmedHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup) { + // find a host with minimum parts which doesn't have this part + ErrorOr result; + if (dependentOnGroup) { + result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); + } else { + result = hostWithMinimalParts(confirmedHostParts, partId); + } + + if (!nebula::ok(result)) { + LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; + return nebula::error(result); + } + const auto& targetHost = nebula::value(result); + confirmedHostParts[targetHost].emplace_back(partId); + tasks.emplace_back(plan_->id(), spaceId, partId, source, targetHost, kvstore_, adminClient_); + zoneParts_[targetHost].second.emplace_back(partId); + auto zoneIt = + std::find(zoneParts_[source].second.begin(), zoneParts_[source].second.end(), partId); + if (zoneIt == zoneParts_[source].second.end()) { + LOG(ERROR) << "part not find " << partId << " at " << source; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +ErrorOr>> +DataBalanceJobExecutor::fetchHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + const HostParts& hostParts, + std::vector& lostHosts) { + ErrorOr> activeHostsRet; + if (dependentOnGroup) { + activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kvstore_, spaceId); + } else { + activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + } + + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + std::vector expand; + auto activeHosts = nebula::value(activeHostsRet); + calDiff(hostParts, activeHosts, expand, lostHosts); + // confirmedHostParts is new part allocation map after balance, it would + // include newlyAdded and exclude lostHosts + HostParts confirmedHostParts(hostParts); + for (const auto& h : expand) { + LOG(INFO) << "Found new host " << h; + confirmedHostParts.emplace(h, std::vector()); + } + for (const auto& h : lostHosts) { + LOG(INFO) << "Lost host " << h; + confirmedHostParts.erase(h); + } + return std::make_pair(confirmedHostParts, activeHosts); +} + +bool DataBalanceJobExecutor::balanceParts(GraphSpaceID spaceId, + HostParts& confirmedHostParts, + int32_t totalParts, + std::vector& tasks, + bool dependentOnGroup) { + auto avgLoad = static_cast(totalParts) / confirmedHostParts.size(); + VLOG(3) << "The expect avg load is " << avgLoad; + int32_t minLoad = std::floor(avgLoad); + int32_t maxLoad = std::ceil(avgLoad); + VLOG(3) << "The min load is " << minLoad << " max load is " << maxLoad; + + auto sortedHosts = sortedHostsByParts(confirmedHostParts); + if (sortedHosts.empty()) { + LOG(ERROR) << "Host is empty"; + return false; + } + + auto maxPartsHost = sortedHosts.back(); + auto minPartsHost = sortedHosts.front(); + auto& sourceHost = maxPartsHost.first; + auto& targetHost = minPartsHost.first; + if (innerBalance_) { + LOG(INFO) << "maxPartsHost.first " << maxPartsHost.first << " minPartsHost.first " + << minPartsHost.first; + while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { + sortedHosts.pop_back(); + maxPartsHost = sortedHosts.back(); + } + + auto& source = maxPartsHost.first; + auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + auto& zoneName = iter->second.first; + int32_t hostsSize = zoneHosts_[zoneName].size(); + int32_t totalPartsZone = 0; + for (auto& host : zoneHosts_[zoneName]) { + auto it = confirmedHostParts.find(host); + if (it == confirmedHostParts.end()) { + LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; + continue; + } + totalPartsZone += it->second.size(); + } + + avgLoad = static_cast(totalPartsZone) / hostsSize; + minLoad = std::floor(avgLoad); + maxLoad = std::ceil(avgLoad); + LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone + << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad + << " The min load is " << minLoad << " max load is " << maxLoad; + } + + while (maxPartsHost.second > maxLoad || minPartsHost.second < minLoad) { + auto& partsFrom = confirmedHostParts[maxPartsHost.first]; + auto& partsTo = confirmedHostParts[minPartsHost.first]; + std::sort(partsFrom.begin(), partsFrom.end()); + std::sort(partsTo.begin(), partsTo.end()); + + LOG(INFO) << maxPartsHost.first << ":" << partsFrom.size() << " -> " << minPartsHost.first + << ":" << partsTo.size(); + std::vector diff; + std::set_difference(partsFrom.begin(), + partsFrom.end(), + partsTo.begin(), + partsTo.end(), + std::inserter(diff, diff.begin())); + bool noAction = true; + for (auto& partId : diff) { + LOG(INFO) << "partsFrom size " << partsFrom.size() << " partsTo size " << partsTo.size() + << " minLoad " << minLoad << " maxLoad " << maxLoad; + if (partsFrom.size() == partsTo.size() + 1 || + partsFrom.size() == static_cast(minLoad) || + partsTo.size() == static_cast(maxLoad)) { + VLOG(3) << "No need to move any parts from " << maxPartsHost.first << " to " + << minPartsHost.first; + break; + } + + LOG(INFO) << "[space:" << spaceId << ", part:" << partId << "] " << maxPartsHost.first << "->" + << minPartsHost.first; + auto it = std::find(partsFrom.begin(), partsFrom.end(), partId); + if (it == partsFrom.end()) { + LOG(ERROR) << "Part " << partId << " not found in partsFrom"; + return false; + } + + if (std::find(partsTo.begin(), partsTo.end(), partId) != partsTo.end()) { + LOG(ERROR) << "Part " << partId << " already existed in partsTo"; + return false; + } + + if (dependentOnGroup) { + if (!checkZoneLegal(sourceHost, targetHost)) { + LOG(INFO) << "sourceHost " << sourceHost << " targetHost " << targetHost + << " not same zone"; + + auto& parts = relatedParts_[targetHost]; + auto minIt = std::find(parts.begin(), parts.end(), partId); + if (minIt != parts.end()) { + LOG(INFO) << "Part " << partId << " have existed"; + continue; + } + } + + auto& sourceNoneName = zoneParts_[sourceHost].first; + auto sourceHosts = zoneHosts_.find(sourceNoneName); + for (auto& sh : sourceHosts->second) { + auto& parts = relatedParts_[sh]; + auto maxIt = std::find(parts.begin(), parts.end(), partId); + if (maxIt == parts.end()) { + LOG(INFO) << "Part " << partId << " not found on " << sh; + continue; + } + parts.erase(maxIt); + } + + auto& targetNoneName = zoneParts_[targetHost].first; + auto targetHosts = zoneHosts_.find(targetNoneName); + for (auto& th : targetHosts->second) { + relatedParts_[th].emplace_back(partId); + } + } + + partsFrom.erase(it); + partsTo.emplace_back(partId); + tasks.emplace_back( + jobId_, spaceId, partId, maxPartsHost.first, minPartsHost.first, kvstore_, adminClient_); + noAction = false; + } + + if (noAction) { + LOG(INFO) << "Here is no action"; + break; + } + sortedHosts = sortedHostsByParts(confirmedHostParts); + maxPartsHost = sortedHosts.back(); + minPartsHost = sortedHosts.front(); + if (innerBalance_) { + while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { + sortedHosts.pop_back(); + maxPartsHost = sortedHosts.back(); + } + + auto& source = maxPartsHost.first; + auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + auto& zoneName = iter->second.first; + int32_t hostsSize = zoneHosts_[zoneName].size(); + int32_t totalPartsZone = 0; + for (auto& host : zoneHosts_[zoneName]) { + auto it = confirmedHostParts.find(host); + if (it == confirmedHostParts.end()) { + LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; + continue; + } + totalPartsZone += it->second.size(); + } + + avgLoad = static_cast(totalPartsZone) / hostsSize; + minLoad = std::floor(avgLoad); + maxLoad = std::ceil(avgLoad); + LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone + << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad + << " The min load is " << minLoad << " max load is " << maxLoad; + } + } + LOG(INFO) << "Balance tasks num: " << tasks.size(); + for (auto& task : tasks) { + LOG(INFO) << task.taskIdStr(); + } + + relatedParts_.clear(); + return true; +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::stop() { + std::lock_guard lg(lock_); + if (!running_) { + return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; + } + stopped_ = true; + plan_->stop(); + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +ErrorOr BalanceJobExecutor::getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts) { + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::partPrefix(spaceId); + std::unique_ptr iter; + auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << " " + << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + while (iter->valid()) { + auto key = iter->key(); + PartitionID partId; + memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); + auto partHosts = MetaKeyUtils::parsePartVal(iter->val()); + for (auto& ph : partHosts) { + hostParts[ph].emplace_back(partId); + } + totalParts++; + iter->next(); + } + + LOG(INFO) << "Host size: " << hostParts.size(); + auto key = MetaKeyUtils::spaceKey(spaceId); + std::string value; + retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, key, &value); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId + << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto properties = MetaKeyUtils::parseSpace(value); + if (totalParts != properties.get_partition_num()) { + LOG(ERROR) << "Partition number not equals"; + LOG(ERROR) << totalParts << " : " << properties.get_partition_num(); + return false; + } + + int32_t replica = properties.get_replica_factor(); + LOG(INFO) << "Replica " << replica; + if (dependentOnGroup && properties.group_name_ref().has_value()) { + auto groupName = *properties.group_name_ref(); + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + int32_t zoneSize = MetaKeyUtils::parseZoneNames(std::move(groupValue)).size(); + LOG(INFO) << "Zone Size " << zoneSize; + innerBalance_ = (replica == zoneSize); + + auto activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kvstore_, spaceId); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + std::vector expand; + auto activeHosts = nebula::value(activeHostsRet); + std::vector lostHosts; + calDiff(hostParts, activeHosts, expand, lostHosts); + // confirmedHostParts is new part allocation map after balance, it would include newlyAdded + // and exclude lostHosts + HostParts confirmedHostParts(hostParts); + for (const auto& h : expand) { + LOG(INFO) << "Found new host " << h; + confirmedHostParts.emplace(h, std::vector()); + } + for (const auto& h : lostHosts) { + LOG(INFO) << "Lost host " << h; + confirmedHostParts.erase(h); + } + + auto zonePartsRet = assembleZoneParts(groupName, confirmedHostParts); + if (zonePartsRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; + return zonePartsRet; + } + } + + totalParts *= replica; + return true; +} + +nebula::cpp2::ErrorCode BalanceJobExecutor::assembleZoneParts(const std::string& groupName, + HostParts& hostParts) { + LOG(INFO) << "Balancer assembleZoneParts"; + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + // zoneHosts use to record this host belong to zone's hosts + std::unordered_map, std::vector> zoneHosts; + auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); + for (const auto& zoneName : zoneNames) { + LOG(INFO) << "Zone Name: " << zoneName; + auto zoneKey = MetaKeyUtils::zoneKey(zoneName); + std::string zoneValue; + retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get zone " << zoneName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); + for (const auto& host : hosts) { + LOG(INFO) << "Host for zone " << host; + auto pair = std::pair(std::move(host), zoneName); + auto& hs = zoneHosts[std::move(pair)]; + hs.insert(hs.end(), hosts.begin(), hosts.end()); + } + } + + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + auto host = it->first; + LOG(INFO) << "Host: " << host; + auto zoneIter = + std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { + return host == pair.first.first; + }); + + if (zoneIter == zoneHosts.end()) { + LOG(INFO) << it->first << " have lost"; + continue; + } + + auto& hosts = zoneIter->second; + auto name = zoneIter->first.second; + zoneHosts_[name] = hosts; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto partIter = hostParts.find(*hostIter); + LOG(INFO) << "Zone " << name << " have the host " << it->first; + if (partIter == hostParts.end()) { + zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); + } else { + zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); + } + } + } + + for (auto it = zoneHosts.begin(); it != zoneHosts.end(); it++) { + auto host = it->first.first; + auto& hosts = it->second; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto h = *hostIter; + auto iter = std::find_if(hostParts.begin(), hostParts.end(), [h](const auto& pair) -> bool { + return h == pair.first; + }); + + if (iter == hostParts.end()) { + continue; + } + + auto& parts = iter->second; + auto& hp = relatedParts_[host]; + hp.insert(hp.end(), parts.begin(), parts.end()); + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +void BalanceJobExecutor::calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& expand, + std::vector& lost) { + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + VLOG(1) << "Original Host " << it->first << ", parts " << it->second.size(); + if (std::find(activeHosts.begin(), activeHosts.end(), it->first) == activeHosts.end() && + std::find(lost.begin(), lost.end(), it->first) == lost.end()) { + lost.emplace_back(it->first); + } + } + for (auto& h : activeHosts) { + VLOG(1) << "Active host " << h; + if (hostParts.find(h) == hostParts.end()) { + expand.emplace_back(h); + } + } +} + +std::vector> DataBalanceJobExecutor::sortedHostsByParts( + const HostParts& hostParts) { + std::vector> hosts; + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + LOG(INFO) << "Host " << it->first << " parts " << it->second.size(); + hosts.emplace_back(it->first, it->second.size()); + } + std::sort(hosts.begin(), hosts.end(), [](const auto& l, const auto& r) { + if (l.second != r.second) { + return l.second < r.second; + } else { + return l.first.host < r.first.host; + } + }); + return hosts; +} + +Status DataBalanceJobExecutor::checkReplica(const HostParts& hostParts, + const std::vector& activeHosts, + int32_t replica, + PartitionID partId) { + // check host hold the part and alive + auto checkPart = [&](const auto& entry) { + auto& host = entry.first; + auto& parts = entry.second; + return std::find(parts.begin(), parts.end(), partId) != parts.end() && + std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); + }; + auto aliveReplica = std::count_if(hostParts.begin(), hostParts.end(), checkPart); + if (aliveReplica >= replica / 2 + 1) { + return Status::OK(); + } + return Status::Error("Not enough alive host hold the part %d", partId); +} + +ErrorOr DataBalanceJobExecutor::hostWithMinimalParts( + const HostParts& hostParts, PartitionID partId) { + auto hosts = sortedHostsByParts(hostParts); + for (auto& h : hosts) { + auto it = hostParts.find(h.first); + if (it == hostParts.end()) { + LOG(ERROR) << "Host " << h.first << " not found"; + return nebula::cpp2::ErrorCode::E_NO_HOSTS; + } + + if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { + return h.first; + } + } + return nebula::cpp2::ErrorCode::E_NO_HOSTS; +} + +ErrorOr DataBalanceJobExecutor::hostWithMinimalPartsForZone( + const HostAddr& source, const HostParts& hostParts, PartitionID partId) { + auto hosts = sortedHostsByParts(hostParts); + for (auto& h : hosts) { + auto it = hostParts.find(h.first); + if (it == hostParts.end()) { + LOG(ERROR) << "Host " << h.first << " not found"; + return nebula::cpp2::ErrorCode::E_NO_HOSTS; + } + + LOG(INFO) << "source " << source << " h.first " << h.first; + if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { + return h.first; + } + } + return nebula::cpp2::ErrorCode::E_NO_HOSTS; +} + +nebula::cpp2::ErrorCode BalanceJobExecutor::collectZoneParts(const std::string& groupName, + HostParts& hostParts) { + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + // zoneHosts use to record this host belong to zone's hosts + std::unordered_map, std::vector> zoneHosts; + auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); + for (auto zoneName : zoneNames) { + auto zoneKey = MetaKeyUtils::zoneKey(zoneName); + std::string zoneValue; + retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get zone " << zoneName + << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); + for (const auto& host : hosts) { + auto pair = std::pair(std::move(host), zoneName); + auto& hs = zoneHosts[std::move(pair)]; + hs.insert(hs.end(), hosts.begin(), hosts.end()); + } + } + + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + auto host = it->first; + auto zoneIter = + std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { + return host == pair.first.first; + }); + + if (zoneIter == zoneHosts.end()) { + LOG(INFO) << it->first << " have lost"; + continue; + } + + auto& hosts = zoneIter->second; + auto name = zoneIter->first.second; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto partIter = hostParts.find(*hostIter); + if (partIter == hostParts.end()) { + zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); + } else { + zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); + } + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +bool DataBalanceJobExecutor::checkZoneLegal(const HostAddr& source, const HostAddr& target) { + VLOG(3) << "Check " << source << " : " << target; + auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + if (sourceIter == zoneParts_.end()) { + LOG(INFO) << "Source " << source << " not found"; + return false; + } + + auto targetIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&target](const auto& pair) { + return target == pair.first; + }); + + if (targetIter == zoneParts_.end()) { + LOG(INFO) << "Target " << target << " not found"; + return false; + } + + LOG(INFO) << sourceIter->second.first << " : " << targetIter->second.first; + return sourceIter->second.first == targetIter->second.first; +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::prepare() { + auto activeHostsRet = ActiveHostsMan::getActiveHosts(kvstore_); + if (!nebula::ok(activeHostsRet)) { + auto retCode = nebula::error(activeHostsRet); + LOG(ERROR) << "Get active hosts failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + auto hosts = std::move(nebula::value(activeHostsRet)); + + if (hosts.empty()) { + LOG(ERROR) << "There is no active hosts"; + return nebula::cpp2::ErrorCode::E_NO_HOSTS; + } + lostHosts_.reserve(paras_.size() - 1); + for (size_t i = 0; i < paras_.size() - 1; i++) { + lostHosts_.emplace_back(HostAddr::fromString(paras_[i])); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::finish(bool ret) { + std::lock_guard lg(lock_); + return finishInternal(ret); +} + +nebula::cpp2::ErrorCode DataBalanceJobExecutor::finishInternal(bool ret) { + CHECK(!lock_.try_lock()); + plan_.reset(nullptr); + running_ = false; + auto rc = onFinished_(ret); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + return rc; + } + return ret ? nebula::cpp2::ErrorCode::SUCCEEDED : nebula::cpp2::ErrorCode::E_BALANCER_FAILURE; +} + +folly::Future DataBalanceJobExecutor::executeInternal(HostAddr&& address, + std::vector&& parts) { + UNUSED(address); + UNUSED(parts); + std::unique_lock lg(lock_); + if (!running_) { + if (plan_ == nullptr) { + auto retCode = buildBalancePlan(); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + if (retCode == nebula::cpp2::ErrorCode::E_BALANCED) { + finishInternal(true); + return Status::OK(); + } else { + return Status::Error(apache::thrift::util::enumNameSafe(retCode)); + } + } + } + LOG(INFO) << "Start to invoke balance plan " << plan_->id(); + running_ = true; + auto fut = folly::via(executor_.get(), std::bind(&BalancePlan::invoke, plan_.get())); + lg.unlock(); + fut.wait(); + return Status::OK(); + } + CHECK(plan_ != nullptr); + LOG(INFO) << "Balance job " << plan_->id() << " is still running"; + return Status::Error(folly::sformat("Balance job {} is still running", plan_->id())); +} + +folly::Future LeaderBalanceJobExecutor::executeInternal(HostAddr&& address, + std::vector&& parts) { + UNUSED(address); + UNUSED(parts); + if (running_.load(std::memory_order_acquire)) { + LOG(INFO) << "Balance process still running"; + return Status::OK(); + } + + folly::Promise promise; + auto future = promise.getFuture(); + // Space ID, Replica Factor and Dependent On Group + std::vector> spaces; + auto ret = getAllSpaces(spaces); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + if (ret != nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + ret = nebula::cpp2::ErrorCode::E_STORE_FAILURE; + } + return Status::Error("Can't get spaces"); + } + + bool expected = false; + if (inLeaderBalance_.compare_exchange_strong(expected, true)) { + hostLeaderMap_.reset(new HostLeaderMap); + auto status = adminClient_->getLeaderDist(hostLeaderMap_.get()).get(); + if (!status.ok() || hostLeaderMap_->empty()) { + inLeaderBalance_ = false; + return Status::Error("Get leader distribution failed"); + } + + std::vector> futures; + for (const auto& spaceInfo : spaces) { + auto spaceId = std::get<0>(spaceInfo); + auto replicaFactor = std::get<1>(spaceInfo); + auto dependentOnGroup = std::get<2>(spaceInfo); + LeaderBalancePlan plan; + auto balanceResult = buildLeaderBalancePlan( + hostLeaderMap_.get(), spaceId, replicaFactor, dependentOnGroup, plan); + if (!nebula::ok(balanceResult) || !nebula::value(balanceResult)) { + LOG(ERROR) << "Building leader balance plan failed " + << "Space: " << spaceId; + continue; + } + simplifyLeaderBalnacePlan(spaceId, plan); + for (const auto& task : plan) { + futures.emplace_back(adminClient_->transLeader(std::get<0>(task), + std::get<1>(task), + std::move(std::get<2>(task)), + std::move(std::get<3>(task)))); + } + } + + int32_t failed = 0; + folly::collectAll(futures) + .via(executor_.get()) + .thenTry([&](const auto& result) { + auto tries = result.value(); + for (const auto& t : tries) { + if (!t.value().ok()) { + ++failed; + } + } + }) + .wait(); + + inLeaderBalance_ = false; + if (failed != 0) { + LOG(ERROR) << failed << " partiton failed to transfer leader"; + } + onFinished_(false); + return Status::Error("partiton failed to transfer leader"); + } + onFinished_(true); + return Status::OK(); +} + +ErrorOr LeaderBalanceJobExecutor::buildLeaderBalancePlan( + HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation) { + PartAllocation peersMap; + HostParts leaderHostParts; + size_t leaderParts = 0; + // store peers of all paritions in peerMap + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::partPrefix(spaceId); + std::unique_ptr iter; + auto retCode = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << static_cast(retCode); + return retCode; + } + + while (iter->valid()) { + auto key = iter->key(); + PartitionID partId; + memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); + auto peers = MetaKeyUtils::parsePartVal(iter->val()); + peersMap[partId] = std::move(peers); + ++leaderParts; + iter->next(); + } + + int32_t totalParts = 0; + HostParts allHostParts; + auto result = getHostParts(spaceId, dependentOnGroup, allHostParts, totalParts); + if (!nebula::ok(result)) { + return nebula::error(result); + } else { + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || allHostParts.empty()) { + LOG(ERROR) << "Invalid space " << spaceId; + return false; + } + } + + std::unordered_set activeHosts; + for (const auto& host : *hostLeaderMap) { + // only balance leader between hosts which have valid partition + if (!allHostParts[host.first].empty()) { + activeHosts.emplace(host.first); + leaderHostParts[host.first] = (*hostLeaderMap)[host.first][spaceId]; + } + } + + if (activeHosts.empty()) { + LOG(ERROR) << "No active hosts"; + return false; + } + + if (dependentOnGroup) { + for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { + auto min = it->second.size() / replicaFactor; + VLOG(3) << "Host: " << it->first << " Bounds: " << min << " : " << min + 1; + hostBounds_[it->first] = std::make_pair(min, min + 1); + } + } else { + size_t activeSize = activeHosts.size(); + size_t globalAvg = leaderParts / activeSize; + size_t globalMin = globalAvg; + size_t globalMax = globalAvg; + if (leaderParts % activeSize != 0) { + globalMax += 1; + } + + if (useDeviation) { + globalMin = std::ceil(static_cast(leaderParts) / activeSize * + (1 - FLAGS_leader_balance_deviation)); + globalMax = std::floor(static_cast(leaderParts) / activeSize * + (1 + FLAGS_leader_balance_deviation)); + } + VLOG(3) << "Build leader balance plan, expected min load: " << globalMin + << ", max load: " << globalMax << " avg: " << globalAvg; + + for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { + hostBounds_[it->first] = std::make_pair(globalMin, globalMax); + } + } + + while (true) { + int32_t taskCount = 0; + bool hasUnbalancedHost = false; + for (const auto& hostEntry : leaderHostParts) { + auto host = hostEntry.first; + auto& hostMinLoad = hostBounds_[host].first; + auto& hostMaxLoad = hostBounds_[host].second; + int32_t partSize = hostEntry.second.size(); + if (hostMinLoad <= partSize && partSize <= hostMaxLoad) { + VLOG(3) << partSize << " is between min load " << hostMinLoad << " and max load " + << hostMaxLoad; + continue; + } + + hasUnbalancedHost = true; + if (partSize < hostMinLoad) { + // need to acquire leader from other hosts + LOG(INFO) << "Acquire leaders to host: " << host << " loading: " << partSize + << " min loading " << hostMinLoad; + taskCount += acquireLeaders( + allHostParts, leaderHostParts, peersMap, activeHosts, host, plan, spaceId); + } else { + // need to transfer leader to other hosts + LOG(INFO) << "Giveup leaders from host: " << host << " loading: " << partSize + << " max loading " << hostMaxLoad; + taskCount += giveupLeaders(leaderHostParts, peersMap, activeHosts, host, plan, spaceId); + } + } + + // If every host is balanced or no more task during this loop, then the plan + // is done + if (!hasUnbalancedHost || taskCount == 0) { + LOG(INFO) << "Not need balance"; + break; + } + } + return true; +} + +int32_t LeaderBalanceJobExecutor::acquireLeaders(HostParts& allHostParts, + HostParts& leaderHostParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& target, + LeaderBalancePlan& plan, + GraphSpaceID spaceId) { + // host will loop for the partition which is not leader, and try to acuire the + // leader + int32_t taskCount = 0; + std::vector diff; + std::set_difference(allHostParts[target].begin(), + allHostParts[target].end(), + leaderHostParts[target].begin(), + leaderHostParts[target].end(), + std::back_inserter(diff)); + auto& targetLeaders = leaderHostParts[target]; + size_t minLoad = hostBounds_[target].first; + for (const auto& partId : diff) { + VLOG(3) << "Try acquire leader for part " << partId; + // find the leader of partId + auto sources = peersMap[partId]; + for (const auto& source : sources) { + if (source == target || !activeHosts.count(source)) { + continue; + } + + // if peer is the leader of partId and can transfer, then transfer it to + // host + auto& sourceLeaders = leaderHostParts[source]; + VLOG(3) << "Check peer: " << source << " min load: " << minLoad + << " peerLeaders size: " << sourceLeaders.size(); + auto it = std::find(sourceLeaders.begin(), sourceLeaders.end(), partId); + if (it != sourceLeaders.end() && minLoad < sourceLeaders.size()) { + sourceLeaders.erase(it); + targetLeaders.emplace_back(partId); + plan.emplace_back(spaceId, partId, source, target); + LOG(INFO) << "acquire plan trans leader space: " << spaceId << " part: " << partId + << " from " << source.host << ":" << source.port << " to " << target.host << ":" + << target.port; + ++taskCount; + break; + } + } + + // if host has enough leader, just return + if (targetLeaders.size() == minLoad) { + LOG(INFO) << "Host: " << target << "'s leader reach " << minLoad; + break; + } + } + return taskCount; +} + +int32_t LeaderBalanceJobExecutor::giveupLeaders(HostParts& leaderParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& source, + LeaderBalancePlan& plan, + GraphSpaceID spaceId) { + int32_t taskCount = 0; + auto& sourceLeaders = leaderParts[source]; + size_t maxLoad = hostBounds_[source].second; + + // host will try to transfer the extra leaders to other peers + for (auto it = sourceLeaders.begin(); it != sourceLeaders.end();) { + // find the leader of partId + auto partId = *it; + const auto& targets = peersMap[partId]; + bool isErase = false; + + // leader should move to the peer with lowest loading + auto target = + std::min_element(targets.begin(), targets.end(), [&](const auto& l, const auto& r) -> bool { + if (source == l || !activeHosts.count(l)) { + return false; + } + return leaderParts[l].size() < leaderParts[r].size(); + }); + + // If peer can accept this partition leader, than host will transfer to the + // peer + if (target != targets.end()) { + auto& targetLeaders = leaderParts[*target]; + int32_t targetLeaderSize = targetLeaders.size(); + if (targetLeaderSize < hostBounds_[*target].second) { + it = sourceLeaders.erase(it); + targetLeaders.emplace_back(partId); + plan.emplace_back(spaceId, partId, source, *target); + LOG(INFO) << "giveup plan trans leader space: " << spaceId << " part: " << partId + << " from " << source.host << ":" << source.port << " to " << target->host << ":" + << target->port; + ++taskCount; + isErase = true; + } + } + + // if host has enough leader, just return + if (sourceLeaders.size() == maxLoad) { + LOG(INFO) << "Host: " << source << "'s leader reach " << maxLoad; + break; + } + + if (!isErase) { + ++it; + } + } + return taskCount; +} + +void LeaderBalanceJobExecutor::simplifyLeaderBalnacePlan(GraphSpaceID spaceId, + LeaderBalancePlan& plan) { + std::unordered_map buckets; + for (auto& task : plan) { + buckets[std::get<1>(task)].emplace_back(task); + } + plan.clear(); + for (const auto& partEntry : buckets) { + plan.emplace_back(spaceId, + partEntry.first, + std::get<2>(partEntry.second.front()), + std::get<3>(partEntry.second.back())); + } +} + } // namespace meta } // namespace nebula diff --git a/src/meta/processors/job/BalanceJobExecutor.h b/src/meta/processors/job/BalanceJobExecutor.h index 6f816b05955..52440bf6eaf 100644 --- a/src/meta/processors/job/BalanceJobExecutor.h +++ b/src/meta/processors/job/BalanceJobExecutor.h @@ -6,20 +6,22 @@ #ifndef META_BALANCEJOBEXECUTOR_H_ #define META_BALANCEJOBEXECUTOR_H_ -#include "meta/processors/admin/BalancePlan.h" -#include "meta/processors/admin/BalanceTask.h" +#include "meta/processors/job/BalancePlan.h" +#include "meta/processors/job/BalanceTask.h" #include "meta/processors/job/SimpleConcurrentJobExecutor.h" namespace nebula { namespace meta { -using HostParts = std::unordered_map>; using ZoneParts = std::pair>; +using HostParts = std::unordered_map>; +using PartAllocation = std::unordered_map>; +using LeaderBalancePlan = std::vector>; +using ZoneNameAndParts = std::pair>; -/* - * BalanceJobExecutor is use to balance data between hosts. - */ class BalanceJobExecutor : public MetaJobExecutor { + friend void testRestBlancer(); + public: BalanceJobExecutor(JobID jobId, kvstore::KVStore* kvstore, @@ -32,9 +34,199 @@ class BalanceJobExecutor : public MetaJobExecutor { nebula::cpp2::ErrorCode stop() override; + nebula::cpp2::ErrorCode finish(bool ret = true) override; + + folly::Future executeInternal(HostAddr&& address, + std::vector&& parts) override; + + bool runInMeta() override; + + nebula::cpp2::ErrorCode recovery() override; + + protected: + nebula::cpp2::ErrorCode getAllSpaces( + std::vector>& spaces); + + ErrorOr getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts); + + void calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& expand, + std::vector& lost); + + nebula::cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); + + nebula::cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); + + nebula::cpp2::ErrorCode save(const std::string& k, const std::string& v); + + protected: + static std::atomic_bool running_; + static std::mutex lock_; + bool innerBalance_ = false; + std::unique_ptr executor_; + std::unordered_map zoneParts_; + std::unordered_map> zoneHosts_; + std::unordered_map> relatedParts_; +}; + +class DataBalanceJobExecutor : public BalanceJobExecutor { + FRIEND_TEST(BalanceTest, BalancePartsTest); + FRIEND_TEST(BalanceTest, NormalTest); + FRIEND_TEST(BalanceTest, SimpleTestWithZone); + FRIEND_TEST(BalanceTest, SpecifyHostTest); + FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); + FRIEND_TEST(BalanceTest, MockReplaceMachineTest); + FRIEND_TEST(BalanceTest, SingleReplicaTest); + FRIEND_TEST(BalanceTest, TryToRecoveryTest); + FRIEND_TEST(BalanceTest, RecoveryTest); + FRIEND_TEST(BalanceTest, StopPlanTest); + FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceTest); + FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); + FRIEND_TEST(BalanceTest, ExpansionZoneTest); + FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); + FRIEND_TEST(BalanceTest, ShrinkZoneTest); + FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); + FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); + FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); + FRIEND_TEST(BalanceIntegrationTest, BalanceTest); + friend void testRestBlancer(); + + public: + DataBalanceJobExecutor(JobDescription jobDescription, + kvstore::KVStore* kvstore, + AdminClient* adminClient, + const std::vector& params) + : BalanceJobExecutor(jobDescription.getJobId(), kvstore, adminClient, params), + jobDescription_(jobDescription) {} + nebula::cpp2::ErrorCode recovery() override; + nebula::cpp2::ErrorCode prepare() override; + nebula::cpp2::ErrorCode finish(bool ret = true) override; + nebula::cpp2::ErrorCode stop() override; + + protected: + folly::Future executeInternal(HostAddr&& address, + std::vector&& parts) override; + nebula::cpp2::ErrorCode buildBalancePlan(); + ErrorOr> genTasks( + GraphSpaceID spaceId, + int32_t spaceReplica, + bool dependentOnGroup, + std::vector& lostHosts); + ErrorOr hostWithMinimalParts(const HostParts& hostParts, + PartitionID partId); + + ErrorOr hostWithMinimalPartsForZone(const HostAddr& source, + const HostParts& hostParts, + PartitionID partId); + bool balanceParts(GraphSpaceID spaceId, + HostParts& confirmedHostParts, + int32_t totalParts, + std::vector& tasks, + bool dependentOnGroup); + ErrorOr>> fetchHostParts( + GraphSpaceID spaceId, + bool dependentOnGroup, + const HostParts& hostParts, + std::vector& lostHosts); + nebula::cpp2::ErrorCode transferLostHost(std::vector& tasks, + HostParts& confirmedHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup); + Status checkReplica(const HostParts& hostParts, + const std::vector& activeHosts, + int32_t replica, + PartitionID partId); + std::vector> sortedHostsByParts(const HostParts& hostParts); + bool checkZoneLegal(const HostAddr& source, const HostAddr& target); + nebula::cpp2::ErrorCode finishInternal(bool ret = true); + + private: + static std::unique_ptr plan_; + std::vector lostHosts_; + JobDescription jobDescription_; +}; + +class LeaderBalanceJobExecutor : public BalanceJobExecutor { + FRIEND_TEST(BalanceTest, BalancePartsTest); + FRIEND_TEST(BalanceTest, NormalTest); + FRIEND_TEST(BalanceTest, SimpleTestWithZone); + FRIEND_TEST(BalanceTest, SpecifyHostTest); + FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); + FRIEND_TEST(BalanceTest, MockReplaceMachineTest); + FRIEND_TEST(BalanceTest, SingleReplicaTest); + FRIEND_TEST(BalanceTest, TryToRecoveryTest); + FRIEND_TEST(BalanceTest, RecoveryTest); + FRIEND_TEST(BalanceTest, StopPlanTest); + FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceTest); + FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); + FRIEND_TEST(BalanceTest, ExpansionZoneTest); + FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); + FRIEND_TEST(BalanceTest, ShrinkZoneTest); + FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); + FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); + FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); + FRIEND_TEST(BalanceIntegrationTest, BalanceTest); + friend void testRestBlancer(); + + public: + LeaderBalanceJobExecutor(JobID jobId, + kvstore::KVStore* kvstore, + AdminClient* adminClient, + const std::vector& params) + : BalanceJobExecutor(jobId, kvstore, adminClient, params) {} + protected: folly::Future executeInternal(HostAddr&& address, std::vector&& parts) override; + + ErrorOr buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation = true); + + int32_t acquireLeaders(HostParts& allHostParts, + HostParts& leaderHostParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& target, + LeaderBalancePlan& plan, + GraphSpaceID spaceId); + + int32_t giveupLeaders(HostParts& leaderParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& source, + LeaderBalancePlan& plan, + GraphSpaceID spaceId); + + void simplifyLeaderBalnacePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); + + private: + static std::atomic_bool inLeaderBalance_; + std::unique_ptr hostLeaderMap_; + std::unordered_map> hostBounds_; }; } // namespace meta diff --git a/src/meta/processors/admin/BalancePlan.cpp b/src/meta/processors/job/BalancePlan.cpp similarity index 69% rename from src/meta/processors/admin/BalancePlan.cpp rename to src/meta/processors/job/BalancePlan.cpp index 66b1a977197..db1533f0f31 100644 --- a/src/meta/processors/admin/BalancePlan.cpp +++ b/src/meta/processors/job/BalancePlan.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "meta/processors/admin/BalancePlan.h" +#include "meta/processors/job/BalancePlan.h" #include @@ -41,8 +41,8 @@ void BalancePlan::dispatchTasks() { } void BalancePlan::invoke() { - status_ = BalanceStatus::IN_PROGRESS; // Sort the tasks by its id to ensure the order after recovery. + setStatus(meta::cpp2::JobStatus::RUNNING); std::sort( tasks_.begin(), tasks_.end(), [](auto& l, auto& r) { return l.taskIdStr() < r.taskIdStr(); }); dispatchTasks(); @@ -55,12 +55,12 @@ void BalancePlan::invoke() { { std::lock_guard lg(lock_); finishedTaskNum_++; - VLOG(1) << "Balance " << id_ << " has completed " << finishedTaskNum_ << " task"; + VLOG(1) << "Balance " << id() << " has completed " << finishedTaskNum_ << " task"; if (finishedTaskNum_ == tasks_.size()) { finished = true; - if (status_ == BalanceStatus::IN_PROGRESS) { - status_ = BalanceStatus::SUCCEEDED; - LOG(INFO) << "Balance " << id_ << " succeeded!"; + if (status() == meta::cpp2::JobStatus::RUNNING) { + setStatus(meta::cpp2::JobStatus::FINISHED); + LOG(INFO) << "Balance " << id() << " succeeded!"; } } stopped = stopped_; @@ -83,17 +83,16 @@ void BalancePlan::invoke() { { std::lock_guard lg(lock_); finishedTaskNum_++; - VLOG(1) << "Balance " << id_ << " has completed " << finishedTaskNum_ << " task"; - status_ = BalanceStatus::FAILED; + VLOG(1) << "Balance " << id() << " has completed " << finishedTaskNum_ << " task"; + setStatus(meta::cpp2::JobStatus::FAILED); if (finishedTaskNum_ == tasks_.size()) { finished = true; - LOG(INFO) << "Balance " << id_ << " failed!"; + LOG(INFO) << "Balance " << id() << " failed!"; } stopped = stopped_; } if (finished) { CHECK_EQ(j, this->buckets_[i].size() - 1); - saveInStore(true); onFinished_(); } else if (j + 1 < this->buckets_[i].size()) { auto& task = this->tasks_[this->buckets_[i][j + 1]]; @@ -122,11 +121,11 @@ void BalancePlan::invoke() { nebula::cpp2::ErrorCode BalancePlan::saveInStore(bool onlyPlan) { CHECK_NOTNULL(kv_); std::vector data; - data.emplace_back(MetaKeyUtils::balancePlanKey(id_), MetaKeyUtils::balancePlanVal(status_)); + data.emplace_back(jobDescription_.jobKey(), jobDescription_.jobVal()); if (!onlyPlan) { for (auto& task : tasks_) { data.emplace_back(MetaKeyUtils::balanceTaskKey( - task.balanceId_, task.spaceId_, task.partId_, task.src_, task.dst_), + task.jobId_, task.spaceId_, task.partId_, task.src_, task.dst_), MetaKeyUtils::balanceTaskVal( task.status_, task.ret_, task.startTimeMs_, task.endTimeMs_)); } @@ -148,28 +147,63 @@ nebula::cpp2::ErrorCode BalancePlan::saveInStore(bool onlyPlan) { return ret; } -nebula::cpp2::ErrorCode BalancePlan::recovery(bool resume) { - CHECK_NOTNULL(kv_); - const auto& prefix = MetaKeyUtils::balanceTaskPrefix(id_); +ErrorOr> BalancePlan::show( + JobID jobId, kvstore::KVStore* kv, AdminClient* client) { + auto ret = getBalanceTasks(jobId, kv, client, true); + if (!ok(ret)) { + return error(ret); + } + std::vector tasks = value(ret); + std::vector thriftTasks; + for (auto& task : tasks) { + cpp2::BalanceTask t; + t.set_id(task.taskIdStr()); + t.set_command(task.taskCommandStr()); + switch (task.result()) { + case BalanceTaskResult::SUCCEEDED: + t.set_result(cpp2::TaskResult::SUCCEEDED); + break; + case BalanceTaskResult::FAILED: + t.set_result(cpp2::TaskResult::FAILED); + break; + case BalanceTaskResult::IN_PROGRESS: + t.set_result(cpp2::TaskResult::IN_PROGRESS); + break; + case BalanceTaskResult::INVALID: + t.set_result(cpp2::TaskResult::INVALID); + break; + } + t.set_start_time(task.startTime()); + t.set_stop_time(task.endTime()); + thriftTasks.emplace_back(std::move(t)); + } + return thriftTasks; +} + +ErrorOr> BalancePlan::getBalanceTasks( + JobID jobId, kvstore::KVStore* kv, AdminClient* client, bool resume) { + CHECK_NOTNULL(kv); + const auto& prefix = MetaKeyUtils::balanceTaskPrefix(jobId); std::unique_ptr iter; - auto ret = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + auto ret = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Can't access kvstore, ret = " << static_cast(ret); return ret; } - + std::vector tasks; while (iter->valid()) { BalanceTask task; - task.kv_ = kv_; - task.client_ = client_; + task.kv_ = kv; + task.client_ = client; { auto tup = MetaKeyUtils::parseBalanceTaskKey(iter->key()); - task.balanceId_ = std::get<0>(tup); + task.jobId_ = std::get<0>(tup); task.spaceId_ = std::get<1>(tup); task.partId_ = std::get<2>(tup); task.src_ = std::get<3>(tup); task.dst_ = std::get<4>(tup); task.taskIdStr_ = task.buildTaskId(); + task.commandStr_ = task.buildCommand(); } { auto tup = MetaKeyUtils::parseBalanceTaskVal(iter->val()); @@ -183,7 +217,7 @@ nebula::cpp2::ErrorCode BalancePlan::recovery(bool resume) { task.ret_ = BalanceTaskResult::IN_PROGRESS; } task.status_ = BalanceTaskStatus::START; - auto activeHostRet = ActiveHostsMan::isLived(kv_, task.dst_); + auto activeHostRet = ActiveHostsMan::isLived(kv, task.dst_); if (!nebula::ok(activeHostRet)) { auto retCode = nebula::error(activeHostRet); LOG(ERROR) << "Get active hosts failed, error: " << static_cast(retCode); @@ -197,10 +231,20 @@ nebula::cpp2::ErrorCode BalancePlan::recovery(bool resume) { } } } - tasks_.emplace_back(std::move(task)); + tasks.emplace_back(std::move(task)); iter->next(); } - return nebula::cpp2::ErrorCode::SUCCEEDED; + return tasks; +} + +nebula::cpp2::ErrorCode BalancePlan::recovery(bool resume) { + auto ret = getBalanceTasks(id(), kv_, client_, resume); + if (!ok(ret)) { + return error(ret); + } + tasks_ = value(ret); + return tasks_.empty() ? nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND + : nebula::cpp2::ErrorCode::SUCCEEDED; } } // namespace meta diff --git a/src/meta/processors/admin/BalancePlan.h b/src/meta/processors/job/BalancePlan.h similarity index 66% rename from src/meta/processors/admin/BalancePlan.h rename to src/meta/processors/job/BalancePlan.h index 0169373c640..2dc9b969e52 100644 --- a/src/meta/processors/admin/BalancePlan.h +++ b/src/meta/processors/job/BalancePlan.h @@ -10,13 +10,15 @@ #include "kvstore/KVStore.h" #include "meta/processors/Common.h" -#include "meta/processors/admin/BalanceTask.h" +#include "meta/processors/job/BalanceTask.h" +#include "meta/processors/job/JobDescription.h" namespace nebula { namespace meta { class BalancePlan { friend class Balancer; + friend class DataBalanceJobExecutor; FRIEND_TEST(BalanceTest, BalancePlanTest); FRIEND_TEST(BalanceTest, NormalTest); FRIEND_TEST(BalanceTest, SpecifyHostTest); @@ -29,16 +31,15 @@ class BalancePlan { FRIEND_TEST(BalanceTest, StopPlanTest); public: - BalancePlan(BalanceID id, kvstore::KVStore* kv, AdminClient* client) - : id_(id), kv_(kv), client_(client) {} + BalancePlan(JobDescription jobDescription, kvstore::KVStore* kv, AdminClient* client) + : jobDescription_(jobDescription), kv_(kv), client_(client) {} BalancePlan(const BalancePlan& plan) - : id_(plan.id_), + : jobDescription_(plan.jobDescription_), kv_(plan.kv_), client_(plan.client_), tasks_(plan.tasks_), - finishedTaskNum_(plan.finishedTaskNum_), - status_(plan.status_) {} + finishedTaskNum_(plan.finishedTaskNum_) {} void addTask(BalanceTask task) { tasks_.emplace_back(std::move(task)); } @@ -53,11 +54,13 @@ class BalancePlan { * */ void rollback() {} - BalanceStatus status() { return status_; } + meta::cpp2::JobStatus status() { return jobDescription_.getStatus(); } + + void setStatus(meta::cpp2::JobStatus status) { jobDescription_.setStatus(status); } nebula::cpp2::ErrorCode saveInStore(bool onlyPlan = false); - BalanceID id() const { return id_; } + JobID id() const { return jobDescription_.getJobId(); } const std::vector& tasks() const { return tasks_; } @@ -68,20 +71,25 @@ class BalancePlan { stopped_ = true; } - private: nebula::cpp2::ErrorCode recovery(bool resume = true); void dispatchTasks(); + static ErrorOr> getBalanceTasks( + JobID jobId, kvstore::KVStore* kv, AdminClient* client, bool resume = true); + + static ErrorOr> show(JobID jobId, + kvstore::KVStore* kv, + AdminClient* client); + private: - BalanceID id_ = 0; + JobDescription jobDescription_; kvstore::KVStore* kv_ = nullptr; AdminClient* client_ = nullptr; std::vector tasks_; std::mutex lock_; size_t finishedTaskNum_ = 0; std::function onFinished_; - BalanceStatus status_ = BalanceStatus::NOT_START; bool stopped_ = false; // List of task index in tasks_; diff --git a/src/meta/processors/admin/BalanceTask.cpp b/src/meta/processors/job/BalanceTask.cpp similarity index 96% rename from src/meta/processors/admin/BalanceTask.cpp rename to src/meta/processors/job/BalanceTask.cpp index 42f2bec3ba1..49653bf8b78 100644 --- a/src/meta/processors/admin/BalanceTask.cpp +++ b/src/meta/processors/job/BalanceTask.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "meta/processors/admin/BalanceTask.h" +#include "meta/processors/job/BalanceTask.h" #include @@ -24,7 +24,7 @@ void BalanceTask::invoke() { // once. CHECK_NOTNULL(client_); if (ret_ == BalanceTaskResult::INVALID) { - endTimeMs_ = time::WallClock::fastNowInMilliSec(); + endTimeMs_ = time::WallClock::fastNowInSec(); saveInStore(); LOG(ERROR) << taskIdStr_ << " Task invalid, status " << static_cast(status_); // When a plan is stopped or dst is not alive any more, a task will be @@ -33,7 +33,7 @@ void BalanceTask::invoke() { onFinished_(); return; } else if (ret_ == BalanceTaskResult::FAILED) { - endTimeMs_ = time::WallClock::fastNowInMilliSec(); + endTimeMs_ = time::WallClock::fastNowInSec(); saveInStore(); LOG(ERROR) << taskIdStr_ << " Task failed, status " << static_cast(status_); onError_(); @@ -46,7 +46,7 @@ void BalanceTask::invoke() { case BalanceTaskStatus::START: { LOG(INFO) << taskIdStr_ << " Start to move part, check the peers firstly!"; ret_ = BalanceTaskResult::IN_PROGRESS; - startTimeMs_ = time::WallClock::fastNowInMilliSec(); + startTimeMs_ = time::WallClock::fastNowInSec(); SAVE_STATE(); client_->checkPeers(spaceId_, partId_).thenValue([this](auto&& resp) { if (!resp.ok()) { @@ -235,7 +235,7 @@ void BalanceTask::rollback() { bool BalanceTask::saveInStore() { CHECK_NOTNULL(kv_); std::vector data; - data.emplace_back(MetaKeyUtils::balanceTaskKey(balanceId_, spaceId_, partId_, src_, dst_), + data.emplace_back(MetaKeyUtils::balanceTaskKey(jobId_, spaceId_, partId_, src_, dst_), MetaKeyUtils::balanceTaskVal(status_, ret_, startTimeMs_, endTimeMs_)); folly::Baton baton; bool ret = true; diff --git a/src/meta/processors/admin/BalanceTask.h b/src/meta/processors/job/BalanceTask.h similarity index 80% rename from src/meta/processors/admin/BalanceTask.h rename to src/meta/processors/job/BalanceTask.h index 7a647d12e5d..5add8dc1c45 100644 --- a/src/meta/processors/admin/BalanceTask.h +++ b/src/meta/processors/job/BalanceTask.h @@ -34,14 +34,14 @@ class BalanceTask { public: BalanceTask() = default; - BalanceTask(BalanceID balanceId, + BalanceTask(JobID jobId, GraphSpaceID spaceId, PartitionID partId, const HostAddr& src, const HostAddr& dst, kvstore::KVStore* kv, AdminClient* client) - : balanceId_(balanceId), + : jobId_(jobId), spaceId_(spaceId), partId_(partId), src_(src), @@ -52,6 +52,8 @@ class BalanceTask { const std::string& taskIdStr() const { return taskIdStr_; } + const std::string& taskCommandStr() const { return commandStr_; } + void invoke(); void rollback(); @@ -59,26 +61,27 @@ class BalanceTask { BalanceTaskResult result() const { return ret_; } private: - std::string buildTaskId() { - return folly::stringPrintf("[%ld, %d:%d, %s:%d->%s:%d]", - balanceId_, - spaceId_, - partId_, - src_.host.c_str(), - src_.port, - dst_.host.c_str(), - dst_.port); + std::string buildTaskId() { return folly::stringPrintf("%d, %d:%d", jobId_, spaceId_, partId_); } + + std::string buildCommand() { + return folly::stringPrintf( + "%s:%d->%s:%d", src_.host.c_str(), src_.port, dst_.host.c_str(), dst_.port); } bool saveInStore(); + int64_t startTime() const { return startTimeMs_; } + + int64_t endTime() const { return endTimeMs_; } + private: - BalanceID balanceId_; + JobID jobId_; GraphSpaceID spaceId_; PartitionID partId_; HostAddr src_; HostAddr dst_; std::string taskIdStr_; + std::string commandStr_; kvstore::KVStore* kv_ = nullptr; AdminClient* client_ = nullptr; BalanceTaskStatus status_ = BalanceTaskStatus::START; diff --git a/src/meta/processors/job/JobDescription.cpp b/src/meta/processors/job/JobDescription.cpp index 65862b4d7c2..d87a4c396e0 100644 --- a/src/meta/processors/job/JobDescription.cpp +++ b/src/meta/processors/job/JobDescription.cpp @@ -149,8 +149,8 @@ std::string JobDescription::archiveKey() { return str; } -bool JobDescription::setStatus(Status newStatus) { - if (JobStatus::laterThan(status_, newStatus)) { +bool JobDescription::setStatus(Status newStatus, bool force) { + if (JobStatus::laterThan(status_, newStatus) && !force) { return false; } status_ = newStatus; diff --git a/src/meta/processors/job/JobDescription.h b/src/meta/processors/job/JobDescription.h index 1270a32831a..5532a7949d6 100644 --- a/src/meta/processors/job/JobDescription.h +++ b/src/meta/processors/job/JobDescription.h @@ -91,7 +91,7 @@ class JobDescription { * will set start time if newStatus is running * will set stop time if newStatus is finished / failed / stopped * */ - bool setStatus(Status newStatus); + bool setStatus(Status newStatus, bool force = false); /* * get a existed job from kvstore, return folly::none if there isn't diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index 4628ffc1244..8d53afd4430 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -20,6 +20,7 @@ #include "meta/common/MetaCommon.h" #include "meta/processors/Common.h" #include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/BalancePlan.h" #include "meta/processors/job/JobStatus.h" #include "meta/processors/job/JobUtils.h" #include "meta/processors/job/TaskDescription.h" @@ -48,10 +49,13 @@ bool JobManager::init(nebula::kvstore::KVStore* store) { } kvStore_ = store; - lowPriorityQueue_ = std::make_unique>(); - highPriorityQueue_ = std::make_unique>(); + lowPriorityQueue_ = std::make_unique, true>>(); + highPriorityQueue_ = std::make_unique, true>>(); status_ = JbmgrStatus::IDLE; + if (handleRemainingJobs() != nebula::cpp2::ErrorCode::SUCCEEDED) { + return false; + } bgThread_ = std::thread(&JobManager::scheduleThread, this); LOG(INFO) << "JobManager initialized"; return true; @@ -59,6 +63,36 @@ bool JobManager::init(nebula::kvstore::KVStore* store) { JobManager::~JobManager() { shutDown(); } +nebula::cpp2::ErrorCode JobManager::handleRemainingJobs() { + std::unique_ptr iter; + auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + std::vector jds; + for (; iter->valid(); iter->next()) { + if (!JobDescription::isJobKey(iter->key())) { + continue; + } + auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); + if (nebula::ok(optJobRet)) { + auto optJob = nebula::value(optJobRet); + std::unique_ptr je = + MetaJobExecutorFactory::createMetaJobExecutor(optJob, kvStore_, adminClient_); + // Only balance has been recovered + if (optJob.getStatus() == cpp2::JobStatus::RUNNING && je->runInMeta()) { + jds.emplace_back(optJob); + } + } + } + for (auto& jd : jds) { + jd.setStatus(cpp2::JobStatus::QUEUE, true); + save(jd.jobKey(), jd.jobVal()); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + void JobManager::shutDown() { LOG(INFO) << "JobManager::shutDown() begin"; if (status_ == JbmgrStatus::STOPPED) { // in case of shutdown more than once @@ -76,8 +110,8 @@ void JobManager::shutDown() { void JobManager::scheduleThread() { LOG(INFO) << "JobManager::runJobBackground() enter"; while (status_ != JbmgrStatus::STOPPED) { - int32_t iJob = 0; - while (status_ == JbmgrStatus::BUSY || !try_dequeue(iJob)) { + std::pair opJobId; + while (status_ == JbmgrStatus::BUSY || !try_dequeue(opJobId)) { if (status_ == JbmgrStatus::STOPPED) { LOG(INFO) << "[JobManager] detect shutdown called, exit"; break; @@ -85,14 +119,14 @@ void JobManager::scheduleThread() { usleep(FLAGS_job_check_intervals); } - auto jobDescRet = JobDescription::loadJobDescription(iJob, kvStore_); + auto jobDescRet = JobDescription::loadJobDescription(opJobId.second, kvStore_); if (!nebula::ok(jobDescRet)) { - LOG(ERROR) << "[JobManager] load an invalid job from queue " << iJob; + LOG(ERROR) << "[JobManager] load an invalid job from queue " << opJobId.second; continue; // leader change or archive happend } auto jobDesc = nebula::value(jobDescRet); if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING)) { - LOG(INFO) << "[JobManager] skip job " << iJob; + LOG(INFO) << "[JobManager] skip job " << opJobId.second; continue; } save(jobDesc.jobKey(), jobDesc.jobVal()); @@ -102,16 +136,16 @@ void JobManager::scheduleThread() { status_ = JbmgrStatus::BUSY; } } - - if (!runJobInternal(jobDesc)) { - jobFinished(iJob, cpp2::JobStatus::FAILED); + if (!runJobInternal(jobDesc, opJobId.first)) { + jobFinished(opJobId.second, cpp2::JobStatus::FAILED); } } } // @return: true if all task dispatched, else false -bool JobManager::runJobInternal(const JobDescription& jobDesc) { - auto jobExec = MetaJobExecutorFactory::createMetaJobExecutor(jobDesc, kvStore_, adminClient_); +bool JobManager::runJobInternal(const JobDescription& jobDesc, JbOp op) { + std::unique_ptr jobExec = + MetaJobExecutorFactory::createMetaJobExecutor(jobDesc, kvStore_, adminClient_); if (jobExec == nullptr) { LOG(ERROR) << "unreconized job cmd " << apache::thrift::util::enumNameSafe(jobDesc.getCmd()); return false; @@ -131,7 +165,28 @@ bool JobManager::runJobInternal(const JobDescription& jobDesc) { LOG(ERROR) << "Job Executor prepare failed"; return false; } - + if (op == JbOp::RECOVER) { + jobExec->recovery(); + } + if (jobExec->runInMeta()) { + jobExec->setFinishCallBack([this, &jobDesc](bool ret) { + SCOPE_EXIT { cleanJob(jobDesc.getJobId()); }; + if (ret) { + JobDescription jd = jobDesc; + if (!jd.setStatus(cpp2::JobStatus::FINISHED)) { + return nebula::cpp2::ErrorCode::E_SAVE_JOB_FAILURE; + } + statusGuard_.lock(); + if (status_ == JbmgrStatus::BUSY) { + status_ = JbmgrStatus::IDLE; + } + statusGuard_.unlock(); + return save(jd.jobKey(), jd.jobVal()); + } else { + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + }); + } if (jobExec->execute() != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Job dispatch failed"; return false; @@ -340,7 +395,7 @@ nebula::cpp2::ErrorCode JobManager::addJob(const JobDescription& jobDesc, AdminC auto rc = save(jobDesc.jobKey(), jobDesc.jobVal()); if (rc == nebula::cpp2::ErrorCode::SUCCEEDED) { auto jobId = jobDesc.getJobId(); - enqueue(jobId, jobDesc.getCmd()); + enqueue(JbOp::ADD, jobId, jobDesc.getCmd()); // Add job to jobMap inFlightJobs_.emplace(jobId, jobDesc); } else { @@ -358,20 +413,20 @@ size_t JobManager::jobSize() const { return highPriorityQueue_->size() + lowPriorityQueue_->size(); } -bool JobManager::try_dequeue(JobID& jobId) { - if (highPriorityQueue_->try_dequeue(jobId)) { +bool JobManager::try_dequeue(std::pair& opJobId) { + if (highPriorityQueue_->try_dequeue(opJobId)) { return true; - } else if (lowPriorityQueue_->try_dequeue(jobId)) { + } else if (lowPriorityQueue_->try_dequeue(opJobId)) { return true; } return false; } -void JobManager::enqueue(const JobID& jobId, const cpp2::AdminCmd& cmd) { +void JobManager::enqueue(const JbOp& op, const JobID& jobId, const cpp2::AdminCmd& cmd) { if (cmd == cpp2::AdminCmd::STATS) { - highPriorityQueue_->enqueue(jobId); + highPriorityQueue_->enqueue(std::make_pair(op, jobId)); } else { - lowPriorityQueue_->enqueue(jobId); + lowPriorityQueue_->enqueue(std::make_pair(op, jobId)); } } @@ -512,6 +567,21 @@ JobManager::showJob(JobID iJob, const std::string& spaceName) { ret.second.emplace_back(td.toTaskDesc()); } } + if (ret.first.get_cmd() == meta::cpp2::AdminCmd::DATA_BALANCE) { + auto res = BalancePlan::show(iJob, kvStore_, adminClient_); + if (ok(res)) { + std::vector thriftTasks = value(res); + auto& vec = ret.first.paras_ref<>().value(); + size_t index = vec.size(); + for (const auto& t : thriftTasks) { + std::string resVal; + apache::thrift::CompactSerializer::serialize(t, &resVal); + auto& val = ret.first.paras_ref<>().value(); + val.emplace_back(resVal); + } + vec.emplace_back(std::to_string(index)); + } + } return ret; } @@ -535,20 +605,43 @@ nebula::cpp2::ErrorCode JobManager::stopJob(JobID iJob, const std::string& space /* * Return: recovered job num. * */ -ErrorOr JobManager::recoverJob(const std::string& spaceName) { +ErrorOr JobManager::recoverJob( + const std::string& spaceName, AdminClient* client, const std::vector& jobIds) { int32_t recoveredJobNum = 0; - std::unique_ptr iter; - auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; + std::vector> kvs; + adminClient_ = client; + if (jobIds.empty()) { + std::unique_ptr iter; + auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + for (; iter->valid(); iter->next()) { + if (!JobDescription::isJobKey(iter->key())) { + continue; + } + kvs.emplace_back(std::make_pair(iter->key(), iter->val())); + } + } else { + std::vector keys; + keys.reserve(jobIds.size()); + for (int jobId : jobIds) { + keys.emplace_back(JobDescription::makeJobKey(jobId)); + } + std::vector values; + auto retCode = kvStore_->multiGet(kDefaultSpaceId, kDefaultPartId, keys, &values); + if (retCode.first != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode.first); + return retCode.first; + } + for (size_t i = 0; i < keys.size(); i++) { + kvs.emplace_back(std::make_pair(keys[i], values[i])); + } } - for (; iter->valid(); iter->next()) { - if (!JobDescription::isJobKey(iter->key())) { - continue; - } - auto optJobRet = JobDescription::makeJobDescription(iter->key(), iter->val()); + for (const std::pair& p : kvs) { + auto optJobRet = JobDescription::makeJobDescription(p.first, p.second); if (nebula::ok(optJobRet)) { auto optJob = nebula::value(optJobRet); if (optJob.getParas().back() != spaceName) { @@ -561,7 +654,7 @@ ErrorOr JobManager::recoverJob(const std::str if (!jobExist) { auto jobId = optJob.getJobId(); - enqueue(jobId, optJob.getCmd()); + enqueue(JbOp::RECOVER, jobId, optJob.getCmd()); inFlightJobs_.emplace(jobId, optJob); ++recoveredJobNum; } diff --git a/src/meta/processors/job/JobManager.h b/src/meta/processors/job/JobManager.h index 383d1fe5d83..ab9b2280269 100644 --- a/src/meta/processors/job/JobManager.h +++ b/src/meta/processors/job/JobManager.h @@ -56,6 +56,11 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab STOPPED, }; + enum class JbOp { + ADD, + RECOVER, + }; + bool init(nebula::kvstore::KVStore* store); void shutDown(); @@ -79,7 +84,9 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab nebula::cpp2::ErrorCode stopJob(JobID iJob, const std::string& spaceName); // return error/recovered job num - ErrorOr recoverJob(const std::string& spaceName); + ErrorOr recoverJob(const std::string& spaceName, + AdminClient* client, + const std::vector& jobIds = {}); /** * @brief persist job executed result, and do the cleanup @@ -97,20 +104,22 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab // Tries to extract an element from the front of the highPriorityQueue_, // if faild, then extract an element from lowPriorityQueue_. // If the element is obtained, return true, otherwise return false. - bool try_dequeue(JobID& jobId); + bool try_dequeue(std::pair& opJobId); // Enter different priority queues according to the command type - void enqueue(const JobID& jobId, const cpp2::AdminCmd& cmd); + void enqueue(const JbOp& op, const JobID& jobId, const cpp2::AdminCmd& cmd); ErrorOr checkIndexJobRuning(); + nebula::cpp2::ErrorCode handleRemainingJobs(); + private: JobManager() = default; void scheduleThread(); void scheduleThreadOld(); - bool runJobInternal(const JobDescription& jobDesc); + bool runJobInternal(const JobDescription& jobDesc, JbOp op); bool runJobInternalOld(const JobDescription& jobDesc); ErrorOr getSpaceId(const std::string& name); @@ -133,12 +142,11 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab // Todo(pandasheep) // When folly is upgraded, PriorityUMPSCQueueSet can be used // Use two queues to simulate priority queue, Divide by job cmd - std::unique_ptr> lowPriorityQueue_; - std::unique_ptr> highPriorityQueue_; + std::unique_ptr, true>> lowPriorityQueue_; + std::unique_ptr, true>> highPriorityQueue_; // The job in running or queue folly::ConcurrentHashMap inFlightJobs_; - std::thread bgThread_; std::mutex statusGuard_; JbmgrStatus status_{JbmgrStatus::NOT_START}; diff --git a/src/meta/processors/job/MetaJobExecutor.cpp b/src/meta/processors/job/MetaJobExecutor.cpp index cf8ad4b3cca..f41e8d4eb2a 100644 --- a/src/meta/processors/job/MetaJobExecutor.cpp +++ b/src/meta/processors/job/MetaJobExecutor.cpp @@ -13,6 +13,7 @@ #include "meta/common/MetaCommon.h" #include "meta/processors/Common.h" #include "meta/processors/admin/AdminClient.h" +#include "meta/processors/job/BalanceJobExecutor.h" #include "meta/processors/job/CompactJobExecutor.h" #include "meta/processors/job/FlushJobExecutor.h" #include "meta/processors/job/RebuildEdgeJobExecutor.h" @@ -34,6 +35,12 @@ std::unique_ptr MetaJobExecutorFactory::createMetaJobExecutor( case cpp2::AdminCmd::COMPACT: ret.reset(new CompactJobExecutor(jd.getJobId(), store, client, jd.getParas())); break; + case cpp2::AdminCmd::DATA_BALANCE: + ret.reset(new DataBalanceJobExecutor(jd, store, client, jd.getParas())); + break; + case cpp2::AdminCmd::LEADER_BALANCE: + ret.reset(new LeaderBalanceJobExecutor(jd.getJobId(), store, client, jd.getParas())); + break; case cpp2::AdminCmd::FLUSH: ret.reset(new FlushJobExecutor(jd.getJobId(), store, client, jd.getParas())); break; @@ -179,6 +186,10 @@ nebula::cpp2::ErrorCode MetaJobExecutor::execute() { addressesRet = getListenerHost(space_, cpp2::ListenerType::ELASTICSEARCH); break; } + case TargetHosts::NONE: { + addressesRet = {{HostAddr(), {}}}; + break; + } case TargetHosts::DEFAULT: { addressesRet = getTargetHost(space_); break; @@ -194,20 +205,22 @@ nebula::cpp2::ErrorCode MetaJobExecutor::execute() { auto addresses = nebula::value(addressesRet); // write all tasks first. - for (auto i = 0U; i != addresses.size(); ++i) { - TaskDescription task(jobId_, i, addresses[i].first); - std::vector data{{task.taskKey(), task.taskVal()}}; - folly::Baton baton; - auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - kvstore_->asyncMultiPut( - kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { - rc = code; - baton.post(); - }); - baton.wait(); - if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(INFO) << "write to kv store failed, error: " << apache::thrift::util::enumNameSafe(rc); - return rc; + if (toHost_ != TargetHosts::NONE) { + for (auto i = 0U; i != addresses.size(); ++i) { + TaskDescription task(jobId_, i, addresses[i].first); + std::vector data{{task.taskKey(), task.taskVal()}}; + folly::Baton baton; + auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; + kvstore_->asyncMultiPut( + kDefaultSpaceId, kDefaultPartId, std::move(data), [&](nebula::cpp2::ErrorCode code) { + rc = code; + baton.post(); + }); + baton.wait(); + if (rc != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(INFO) << "write to kv store failed, error: " << apache::thrift::util::enumNameSafe(rc); + return rc; + } } } diff --git a/src/meta/processors/job/MetaJobExecutor.h b/src/meta/processors/job/MetaJobExecutor.h index c079f4514f1..bd10d0af110 100644 --- a/src/meta/processors/job/MetaJobExecutor.h +++ b/src/meta/processors/job/MetaJobExecutor.h @@ -21,13 +21,15 @@ using ErrOrHosts = ErrorOr>; class MetaJobExecutor { public: - enum class TargetHosts { LEADER = 0, LISTENER, DEFAULT }; + enum class TargetHosts { LEADER = 0, LISTENER, NONE, DEFAULT }; MetaJobExecutor(JobID jobId, kvstore::KVStore* kvstore, AdminClient* adminClient, const std::vector& paras) - : jobId_(jobId), kvstore_(kvstore), adminClient_(adminClient), paras_(paras) {} + : jobId_(jobId), kvstore_(kvstore), adminClient_(adminClient), paras_(paras) { + onFinished_ = [](bool) { return nebula::cpp2::ErrorCode::SUCCEEDED; }; + } virtual ~MetaJobExecutor() = default; @@ -54,6 +56,14 @@ class MetaJobExecutor { return nebula::cpp2::ErrorCode::SUCCEEDED; } + virtual bool runInMeta() { return false; } + + virtual nebula::cpp2::ErrorCode recovery() { return nebula::cpp2::ErrorCode::SUCCEEDED; } + + void setFinishCallBack(std::function func) { + onFinished_ = func; + } + protected: ErrorOr getSpaceIdFromName(const std::string& spaceName); @@ -75,9 +85,10 @@ class MetaJobExecutor { std::vector paras_; TargetHosts toHost_{TargetHosts::DEFAULT}; int32_t concurrency_{INT_MAX}; - bool stopped_{false}; + volatile bool stopped_{false}; std::mutex muInterrupt_; std::condition_variable condInterrupt_; + std::function onFinished_; }; class MetaJobExecutorFactory { diff --git a/src/meta/test/AdminClientTest.cpp b/src/meta/test/AdminClientTest.cpp index 1cb849be132..bb98cb22c96 100644 --- a/src/meta/test/AdminClientTest.cpp +++ b/src/meta/test/AdminClientTest.cpp @@ -11,7 +11,6 @@ #include "common/fs/TempDir.h" #include "common/utils/Utils.h" #include "interface/gen-cpp2/StorageAdminService.h" -#include "meta/processors/admin/Balancer.h" #include "meta/test/TestUtils.h" #define RETURN_OK(req) \ diff --git a/src/meta/test/BalanceIntegrationTest.cpp b/src/meta/test/BalanceIntegrationTest.cpp index cea54b8c2ef..eb7c4c8a6a4 100644 --- a/src/meta/test/BalanceIntegrationTest.cpp +++ b/src/meta/test/BalanceIntegrationTest.cpp @@ -9,7 +9,6 @@ #include "common/base/Base.h" #include "common/fs/TempDir.h" -#include "meta/processors/admin/Balancer.h" #include "meta/test/TestUtils.h" #include "storage/client/StorageClient.h" #include "storage/test/TestUtils.h" diff --git a/src/meta/test/BalancerTest.cpp b/src/meta/test/BalancerTest.cpp index cf9d973f1f4..b18cf0016a7 100644 --- a/src/meta/test/BalancerTest.cpp +++ b/src/meta/test/BalancerTest.cpp @@ -9,7 +9,7 @@ #include "common/base/Base.h" #include "common/fs/TempDir.h" -#include "meta/processors/admin/Balancer.h" +#include "meta/processors/job/BalanceJobExecutor.h" #include "meta/processors/parts/CreateSpaceProcessor.h" #include "meta/test/MockAdminClient.h" #include "meta/test/TestUtils.h" @@ -32,6 +32,7 @@ using ::testing::Return; using ::testing::SetArgPointee; using ::testing::StrictMock; +std::atomic testJobId = 1; TEST(BalanceTest, BalanceTaskTest) { fs::TempDir rootPath("/tmp/SimpleTest.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); @@ -55,7 +56,8 @@ TEST(BalanceTest, BalanceTaskTest) { EXPECT_CALL(client, removePart(0, 0, src)).Times(1); folly::Baton b; - BalanceTask task(0, 0, 0, src, dst, kv, &client); + BalanceTask task( + testJobId.fetch_add(1, std::memory_order_relaxed), 0, 0, src, dst, kv, &client); task.onFinished_ = [&]() { LOG(INFO) << "Task finished!"; EXPECT_EQ(BalanceTaskResult::SUCCEEDED, task.ret_); @@ -73,7 +75,8 @@ TEST(BalanceTest, BalanceTaskTest) { .WillOnce(Return(ByMove(folly::Future(Status::Error("Transfer failed"))))); folly::Baton b; - BalanceTask task(0, 0, 0, src, dst, kv, &client); + BalanceTask task( + testJobId.fetch_add(1, std::memory_order_relaxed), 0, 0, src, dst, kv, &client); task.onFinished_ = []() { LOG(FATAL) << "We should not reach here!"; }; task.onError_ = [&]() { LOG(INFO) << "Error happens!"; @@ -131,6 +134,13 @@ HostParts assignHostParts(kvstore::KVStore* kv, GraphSpaceID spaceId) { return hostPart; } +void testRestBlancer() { + DataBalanceJobExecutor::plan_.reset(nullptr); + BalanceJobExecutor::lock_.unlock(); + BalanceJobExecutor::running_ = false; + LeaderBalanceJobExecutor::inLeaderBalance_ = false; +} + TEST(BalanceTest, SimpleTestWithZone) { fs::TempDir rootPath("/tmp/SimpleTestWithZone.XXXXXX"); auto store = MockCluster::initMetaKV(rootPath.path()); @@ -177,15 +187,18 @@ TEST(BalanceTest, SimpleTestWithZone) { int32_t totalParts = 12; std::vector tasks; NiceMock client; - Balancer balancer(kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); auto code = balancer.assembleZoneParts("group_0", hostParts); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, true); + balancer.balanceParts(0, hostParts, totalParts, tasks, true); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { EXPECT_EQ(3, it->second.size()); } EXPECT_EQ(3, tasks.size()); } + testRestBlancer(); } TEST(BalanceTest, ExpansionZoneTest) { @@ -225,9 +238,11 @@ TEST(BalanceTest, ExpansionZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_BALANCED, error(ret)); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); { std::vector hosts; for (int i = 0; i < 4; i++) { @@ -249,12 +264,13 @@ TEST(BalanceTest, ExpansionZoneTest) { ASSERT_TRUE(nebula::ok(result)); std::vector tasks; hostParts.emplace(HostAddr("3", 3), std::vector{}); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, true); + balancer.balanceParts(0, hostParts, totalParts, tasks, true); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { EXPECT_EQ(3, it->second.size()); } EXPECT_EQ(3, tasks.size()); } + testRestBlancer(); } TEST(BalanceTest, ExpansionHostIntoZoneTest) { @@ -294,9 +310,11 @@ TEST(BalanceTest, ExpansionHostIntoZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_BALANCED, error(ret)); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); { std::vector hosts; for (int i = 0; i < 6; i++) { @@ -321,12 +339,13 @@ TEST(BalanceTest, ExpansionHostIntoZoneTest) { hostParts.emplace(HostAddr("4", 4), std::vector{}); hostParts.emplace(HostAddr("5", 5), std::vector{}); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, true); + balancer.balanceParts(0, hostParts, totalParts, tasks, true); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { EXPECT_EQ(2, it->second.size()); } EXPECT_EQ(6, tasks.size()); } + testRestBlancer(); } TEST(BalanceTest, ShrinkZoneTest) { @@ -369,11 +388,15 @@ TEST(BalanceTest, ShrinkZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_BALANCED, error(ret)); - ret = balancer.balance({{"3", 3}}); - ASSERT_TRUE(ok(ret)); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + balancer.lostHosts_ = {{"3", 3}}; + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); } TEST(BalanceTest, ShrinkHostFromZoneTest) { @@ -415,9 +438,11 @@ TEST(BalanceTest, ShrinkHostFromZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_BALANCED, error(ret)); + JobDescription jd(0L, cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); showHostLoading(kv, 1); { @@ -426,8 +451,9 @@ TEST(BalanceTest, ShrinkHostFromZoneTest) { GroupInfo groupInfo = {{"default_group", {"zone_0", "zone_1", "zone_2"}}}; TestUtils::assembleGroupAndZone(kv, zoneInfo, groupInfo); } - ret = balancer.balance({{"3", 3}}); - ASSERT_TRUE(ok(ret)); + balancer.lostHosts_ = {{"3", 3}}; + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); } TEST(BalanceTest, BalanceWithComplexZoneTest) { @@ -529,13 +555,14 @@ TEST(BalanceTest, BalanceWithComplexZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); { int32_t totalParts = 18 * 3; std::vector tasks; auto hostParts = assignHostParts(kv, 1); - balancer.balanceParts(0, 1, hostParts, totalParts, tasks, true); + balancer.balanceParts(1, hostParts, totalParts, tasks, true); } { int32_t totalParts = 64 * 3; @@ -543,7 +570,7 @@ TEST(BalanceTest, BalanceWithComplexZoneTest) { auto hostParts = assignHostParts(kv, 2); auto code = balancer.assembleZoneParts("group_0", hostParts); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(0, 2, hostParts, totalParts, tasks, true); + balancer.balanceParts(2, hostParts, totalParts, tasks, true); } { auto dump = [](const HostParts& hostParts, const std::vector& tasks) { @@ -580,7 +607,7 @@ TEST(BalanceTest, BalanceWithComplexZoneTest) { dump(hostParts, tasks); auto code = balancer.assembleZoneParts("group_1", hostParts); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); - balancer.balanceParts(0, 3, hostParts, totalParts, tasks, true); + balancer.balanceParts(3, hostParts, totalParts, tasks, true); LOG(INFO) << "=== new map ===="; dump(hostParts, tasks); @@ -626,8 +653,10 @@ TEST(BalanceTest, BalancePartsTest) { std::vector tasks; VLOG(1) << "=== original map ===="; dump(hostParts, tasks); - Balancer balancer(kv, &client); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, false); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.balanceParts(0, hostParts, totalParts, tasks, false); VLOG(1) << "=== new map ===="; dump(hostParts, tasks); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { @@ -645,8 +674,10 @@ TEST(BalanceTest, BalancePartsTest) { std::vector tasks; VLOG(1) << "=== original map ===="; dump(hostParts, tasks); - Balancer balancer(kv, &client); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, false); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.balanceParts(0, hostParts, totalParts, tasks, false); VLOG(1) << "=== new map ===="; dump(hostParts, tasks); EXPECT_EQ(4, hostParts[HostAddr("0", 0)].size()); @@ -665,8 +696,10 @@ TEST(BalanceTest, BalancePartsTest) { std::vector tasks; VLOG(1) << "=== original map ===="; dump(hostParts, tasks); - Balancer balancer(kv, &client); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, false); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.balanceParts(0, hostParts, totalParts, tasks, false); VLOG(1) << "=== new map ===="; dump(hostParts, tasks); EXPECT_EQ(4, hostParts[HostAddr("0", 0)].size()); @@ -690,8 +723,10 @@ TEST(BalanceTest, BalancePartsTest) { std::vector tasks; VLOG(1) << "=== original map ===="; dump(hostParts, tasks); - Balancer balancer(kv, &client); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, false); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.balanceParts(0, hostParts, totalParts, tasks, false); VLOG(1) << "=== new map ===="; dump(hostParts, tasks); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { @@ -713,8 +748,10 @@ TEST(BalanceTest, BalancePartsTest) { std::vector tasks; VLOG(1) << "=== original map ===="; dump(hostParts, tasks); - Balancer balancer(kv, &client); - balancer.balanceParts(0, 0, hostParts, totalParts, tasks, false); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + balancer.balanceParts(0, hostParts, totalParts, tasks, false); VLOG(1) << "=== new map ===="; dump(hostParts, tasks); for (auto it = hostParts.begin(); it != hostParts.end(); it++) { @@ -728,7 +765,9 @@ TEST(BalanceTest, BalancePartsTest) { TEST(BalanceTest, DispatchTasksTest) { { FLAGS_task_concurrency = 10; - BalancePlan plan(0L, nullptr, nullptr); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, nullptr, nullptr); for (int i = 0; i < 20; i++) { BalanceTask task(0, 0, @@ -747,7 +786,9 @@ TEST(BalanceTest, DispatchTasksTest) { } { FLAGS_task_concurrency = 10; - BalancePlan plan(0L, nullptr, nullptr); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, nullptr, nullptr); for (int i = 0; i < 5; i++) { BalanceTask task(0, 0, @@ -766,7 +807,9 @@ TEST(BalanceTest, DispatchTasksTest) { } { FLAGS_task_concurrency = 20; - BalancePlan plan(0L, nullptr, nullptr); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, nullptr, nullptr); for (int i = 0; i < 5; i++) { BalanceTask task(0, 0, @@ -814,17 +857,24 @@ TEST(BalanceTest, BalancePlanTest) { { LOG(INFO) << "Test with all tasks succeeded, only one bucket!"; NiceMock client; - BalancePlan plan(0L, kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, kv, &client); TestUtils::registerHB(kv, hosts); for (int i = 0; i < 10; i++) { - BalanceTask task( - 0, 0, 0, HostAddr(std::to_string(i), 0), HostAddr(std::to_string(i), 1), kv, &client); + BalanceTask task(plan.id(), + 0, + 0, + HostAddr(std::to_string(i), 0), + HostAddr(std::to_string(i), 1), + kv, + &client); plan.addTask(std::move(task)); } folly::Baton b; plan.onFinished_ = [&plan, &b]() { - ASSERT_EQ(BalanceStatus::SUCCEEDED, plan.status_); + ASSERT_EQ(meta::cpp2::JobStatus::FINISHED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); }; @@ -838,17 +888,24 @@ TEST(BalanceTest, BalancePlanTest) { { LOG(INFO) << "Test with all tasks succeeded, 10 buckets!"; NiceMock client; - BalancePlan plan(0L, kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, kv, &client); TestUtils::registerHB(kv, hosts); for (int i = 0; i < 10; i++) { - BalanceTask task( - 0, 0, i, HostAddr(std::to_string(i), 0), HostAddr(std::to_string(i), 1), kv, &client); + BalanceTask task(plan.id(), + 0, + i, + HostAddr(std::to_string(i), 0), + HostAddr(std::to_string(i), 1), + kv, + &client); plan.addTask(std::move(task)); } folly::Baton b; plan.onFinished_ = [&plan, &b]() { - ASSERT_EQ(BalanceStatus::SUCCEEDED, plan.status_); + ASSERT_EQ(meta::cpp2::JobStatus::FINISHED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); }; @@ -863,12 +920,19 @@ TEST(BalanceTest, BalancePlanTest) { } { LOG(INFO) << "Test with one task failed, 10 buckets"; - BalancePlan plan(0L, kv, nullptr); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + BalancePlan plan(jd, kv, nullptr); NiceMock client1, client2; { for (int i = 0; i < 9; i++) { - BalanceTask task( - 0, 0, i, HostAddr(std::to_string(i), 0), HostAddr(std::to_string(i), 1), kv, &client1); + BalanceTask task(plan.id(), + 0, + i, + HostAddr(std::to_string(i), 0), + HostAddr(std::to_string(i), 1), + kv, + &client1); plan.addTask(std::move(task)); } } @@ -876,13 +940,13 @@ TEST(BalanceTest, BalancePlanTest) { EXPECT_CALL(client2, transLeader(_, _, _, _)) .Times(1) .WillOnce(Return(ByMove(folly::Future(Status::Error("Transfer failed"))))); - BalanceTask task(0, 0, 9, HostAddr("9", 0), HostAddr("9", 1), kv, &client2); + BalanceTask task(plan.id(), 0, 9, HostAddr("9", 0), HostAddr("9", 1), kv, &client2); plan.addTask(std::move(task)); } TestUtils::registerHB(kv, hosts); folly::Baton b; plan.onFinished_ = [&plan, &b]() { - ASSERT_EQ(BalanceStatus::FAILED, plan.status_); + ASSERT_EQ(meta::cpp2::JobStatus::FAILED, plan.status()); ASSERT_EQ(10, plan.finishedTaskNum_); b.post(); }; @@ -891,37 +955,31 @@ TEST(BalanceTest, BalancePlanTest) { } } -int32_t verifyBalancePlan(kvstore::KVStore* kv, BalanceID balanceId, BalanceStatus balanceStatus) { - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retcode = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); +void verifyBalancePlan(kvstore::KVStore* kv, JobID jobId, meta::cpp2::JobStatus jobStatus) { + std::string key = JobDescription::makeJobKey(jobId); + std::string value; + auto retcode = kv->get(kDefaultSpaceId, kDefaultPartId, key, &value); EXPECT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); - int32_t num = 0; - while (iter->valid()) { - auto id = MetaKeyUtils::parseBalanceID(iter->key()); - auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); - EXPECT_EQ(balanceId, id); - EXPECT_EQ(balanceStatus, status); - num++; - iter->next(); - } - return num; + auto optJobRet = JobDescription::makeJobDescription(key, value); + EXPECT_TRUE(nebula::ok(optJobRet)); + auto optJob = nebula::value(optJobRet); + EXPECT_EQ(jobStatus, optJob.getStatus()); } void verifyBalanceTask(kvstore::KVStore* kv, - BalanceID balanceId, + JobID jobId, BalanceTaskStatus status, BalanceTaskResult result, std::unordered_map& partCount, int32_t exceptNumber = 0) { - const auto& prefix = MetaKeyUtils::balanceTaskPrefix(balanceId); + const auto& prefix = MetaKeyUtils::balanceTaskPrefix(jobId); std::unique_ptr iter; auto code = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); int32_t num = 0; while (iter->valid()) { auto keyTuple = MetaKeyUtils::parseBalanceTaskKey(iter->key()); - ASSERT_EQ(balanceId, std::get<0>(keyTuple)); + ASSERT_EQ(jobId, std::get<0>(keyTuple)); ASSERT_EQ(1, std::get<1>(keyTuple)); partCount[std::get<3>(keyTuple)]--; partCount[std::get<4>(keyTuple)]++; @@ -950,22 +1008,21 @@ TEST(BalanceTest, NormalTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_BALANCED, error(ret)); - + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); LOG(INFO) << "Now, we lost host " << HostAddr("3", 3); TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - ret = balancer.balance(); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); sleep(1); - LOG(INFO) << "Rebalance finished!"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); } TEST(BalanceTest, SpecifyHostTest) { @@ -980,19 +1037,20 @@ TEST(BalanceTest, SpecifyHostTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); sleep(1); LOG(INFO) << "Now, we remove host {3, 3}"; TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}}); - auto ret = balancer.balance({{"3", 3}}); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); - sleep(1); + balancer.lostHosts_ = {{"3", 3}}; + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); LOG(INFO) << "Rebalance finished!"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); } TEST(BalanceTest, SpecifyMultiHostTest) { @@ -1010,30 +1068,35 @@ TEST(BalanceTest, SpecifyMultiHostTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); LOG(INFO) << "Now, we want to remove host {2, 2}/{3, 3}"; // If {"2", 2} and {"3", 3} are both dead, minority hosts for some part are // alive, it would lead to a fail TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"4", 4}, {"5", 5}}); - auto ret = balancer.balance({{"2", 2}, {"3", 3}}); - ASSERT_FALSE(ok(ret)); - EXPECT_EQ(nebula::cpp2::ErrorCode::E_NO_VALID_HOST, error(ret)); + balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(apache::thrift::util::enumNameSafe(nebula::cpp2::ErrorCode::E_NO_VALID_HOST), + ret.value().message()); // If {"2", 2} is dead, {"3", 3} still alive, each part has majority hosts // alive + testRestBlancer(); TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"3", 3}, {"4", 4}, {"5", 5}}); - ret = balancer.balance({{"2", 2}, {"3", 3}}); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); + balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); LOG(INFO) << "Rebalance finished!"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); // In theory, there should be only 12 tasks, but in some environment, 13 tasks // is generated. A parition is moved more than once from A -> B -> C, actually // A -> C is enough. - verifyBalanceTask(kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount); + verifyBalanceTask( + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount); ASSERT_EQ(9, partCount[HostAddr("0", 0)]); ASSERT_EQ(9, partCount[HostAddr("1", 1)]); ASSERT_EQ(0, partCount[HostAddr("2", 2)]); @@ -1053,7 +1116,9 @@ TEST(BalanceTest, MockReplaceMachineTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); // add a new machine TestUtils::createSomeHosts(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}}); @@ -1062,16 +1127,14 @@ TEST(BalanceTest, MockReplaceMachineTest) { sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); // {2, 2} should be offline now TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"3", 3}}); - auto ret = balancer.balance(); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); LOG(INFO) << "Rebalance finished!"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); - std::unordered_map partCount; verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 12); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 12); } TEST(BalanceTest, SingleReplicaTest) { @@ -1089,20 +1152,23 @@ TEST(BalanceTest, SingleReplicaTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); sleep(1); LOG(INFO) << "Now, we want to remove host {2, 2} and {3, 3}"; TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}, {"3", 3}, {"4", 4}, {"5", 5}}); - auto ret = balancer.balance({{"2", 2}, {"3", 3}}); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); + + balancer.lostHosts_ = {{"2", 2}, {"3", 3}}; + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); LOG(INFO) << "Rebalance finished!"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 4); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 4); ASSERT_EQ(3, partCount[HostAddr("0", 0)]); ASSERT_EQ(3, partCount[HostAddr("1", 1)]); ASSERT_EQ(0, partCount[HostAddr("2", 2)]); @@ -1137,28 +1203,32 @@ TEST(BalanceTest, TryToRecoveryTest) { .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))) .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))); - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - CHECK(ok(ret)); - auto balanceId = value(ret); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); - - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::FAILED)); std::unordered_map partCount; - verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::CATCH_UP_DATA, BalanceTaskResult::FAILED, partCount, 6); + verifyBalanceTask(kv, + balancer.jobId_, + BalanceTaskStatus::CATCH_UP_DATA, + BalanceTaskResult::FAILED, + partCount, + 6); sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - LOG(INFO) << "Now let's try to recovery it. Sinc eall host would be regarded " + LOG(INFO) << "Now let's try to recovery it. Since all host would be regarded " "as offline, " << "so all task will be invalid"; - ret = balancer.balance(); - CHECK(ok(ret)); - balanceId = value(ret); + balancer.recovery(); + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::START, BalanceTaskResult::INVALID, partCount, 6); + kv, balancer.jobId_, BalanceTaskStatus::START, BalanceTaskResult::INVALID, partCount, 6); } TEST(BalanceTest, RecoveryTest) { @@ -1187,28 +1257,30 @@ TEST(BalanceTest, RecoveryTest) { sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); LOG(INFO) << "Now, we lost host " << HostAddr("3", 3); TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - ASSERT_TRUE(ok(ret)); - auto balanceId = value(ret); + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); - - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::FAILED)); std::unordered_map partCount; - verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::CATCH_UP_DATA, BalanceTaskResult::FAILED, partCount, 6); + verifyBalanceTask(kv, + balancer.jobId_, + BalanceTaskStatus::CATCH_UP_DATA, + BalanceTaskResult::FAILED, + partCount, + 6); // register hb again to prevent from regarding src as offline TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); LOG(INFO) << "Now let's try to recovery it."; - ret = balancer.balance(); - ASSERT_TRUE(ok(ret)); - balanceId = value(ret); + balancer.recovery(); + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); sleep(1); - - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::SUCCEEDED)); verifyBalanceTask( - kv, balanceId, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); + kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount, 6); } TEST(BalanceTest, StopPlanTest) { @@ -1234,24 +1306,22 @@ TEST(BalanceTest, StopPlanTest) { .WillOnce( Return(ByMove(folly::makeFuture(Status::OK()).delayed(std::chrono::seconds(3))))); - Balancer balancer(kv, &delayClient); - auto ret = balancer.balance(); - CHECK(ok(ret)); - auto balanceId = value(ret); - + JobDescription jd( + testJobId.fetch_add(1, std::memory_order_relaxed), cpp2::AdminCmd::DATA_BALANCE, {}); + DataBalanceJobExecutor balancer(jd, kv, &delayClient, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); sleep(1); LOG(INFO) << "Rebalance should still in progress"; - ASSERT_EQ(1, verifyBalancePlan(kv, balanceId, BalanceStatus::IN_PROGRESS)); TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); auto stopRet = balancer.stop(); - CHECK(nebula::ok(stopRet)); - ASSERT_EQ(nebula::value(stopRet), balanceId); + EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, stopRet); // wait until the only IN_PROGRESS task finished; sleep(3); { - const auto& prefix = MetaKeyUtils::balanceTaskPrefix(balanceId); + const auto& prefix = MetaKeyUtils::balanceTaskPrefix(balancer.jobId_); std::unique_ptr iter; auto retcode = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); ASSERT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); @@ -1283,68 +1353,14 @@ TEST(BalanceTest, StopPlanTest) { TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); NiceMock normalClient; - balancer.client_ = &normalClient; - ret = balancer.balance(); - CHECK(ok(ret)); - ASSERT_NE(value(ret), balanceId); + balancer.adminClient_ = &normalClient; + testRestBlancer(); + ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::OK(), ret.value()); + testRestBlancer(); sleep(1); } -TEST(BalanceTest, CleanLastInvalidBalancePlanTest) { - FLAGS_task_concurrency = 1; - fs::TempDir rootPath("/tmp/CleanLastInvalidBalancePlanTest.XXXXXX"); - auto store = MockCluster::initMetaKV(rootPath.path()); - auto* kv = dynamic_cast(store.get()); - FLAGS_heartbeat_interval_secs = 1; - TestUtils::createSomeHosts(kv); - TestUtils::assembleSpace(kv, 1, 8, 3, 4); - - DefaultValue>::SetFactory( - [] { return folly::Future(Status::OK()); }); - NiceMock client; - - // concurrency = 1, we could only block first task - EXPECT_CALL(client, waitingForCatchUpData(_, _, _)) - .Times(AtLeast(12)) - .WillOnce(Return(ByMove(folly::Future(Status::Error("catch up failed"))))); - - sleep(FLAGS_heartbeat_interval_secs * FLAGS_expired_time_factor + 1); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - Balancer balancer(kv, &client); - auto ret = balancer.balance(); - CHECK(ok(ret)); - auto balanceId = value(ret); - - // wait until the plan finished, no running plan for now, only one task has - // been executed, so the task will be failed, try to clean the invalid plan - sleep(3); - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - auto cleanRet = balancer.cleanLastInValidPlan(); - CHECK(ok(cleanRet)); - ASSERT_EQ(value(cleanRet), balanceId); - - { - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retcode = kv->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - ASSERT_EQ(retcode, nebula::cpp2::ErrorCode::SUCCEEDED); - int num = 0; - while (iter->valid()) { - num++; - iter->next(); - } - ASSERT_EQ(0, num); - } - - // start a new balance plan - TestUtils::registerHB(kv, {{"0", 0}, {"1", 1}, {"2", 2}}); - ret = balancer.balance(); - CHECK(ok(ret)); - ASSERT_NE(balanceId, value(ret)); - sleep(1); - ASSERT_EQ(1, verifyBalancePlan(kv, value(ret), BalanceStatus::SUCCEEDED)); -} - void verifyLeaderBalancePlan(HostLeaderMap& hostLeaderMap, const LeaderBalancePlan& plan, size_t minLoad, @@ -1382,7 +1398,8 @@ TEST(BalanceTest, SimpleLeaderBalancePlanTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); { HostLeaderMap hostLeaderMap; hostLeaderMap[HostAddr("0", 0)][1] = {1, 2, 3, 4, 5}; @@ -1459,7 +1476,8 @@ TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); { HostLeaderMap hostLeaderMap; @@ -1562,7 +1580,8 @@ TEST(BalanceTest, ManyHostsLeaderBalancePlanTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); // chcek several times if they are balanced for (int count = 0; count < 1; count++) { @@ -1604,9 +1623,10 @@ TEST(BalanceTest, LeaderBalanceTest) { EXPECT_CALL(client, getLeaderDist(_)) .WillOnce(DoAll(SetArgPointee<0>(dist), Return(ByMove(folly::Future(Status::OK()))))); - Balancer balancer(kv, &client); - auto ret = balancer.leaderBalance(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); + auto ret = balancer.executeInternal(HostAddr(), {}); + ASSERT_EQ(Status::Error("partiton failed to transfer leader"), ret.value()); } TEST(BalanceTest, LeaderBalanceWithZoneTest) { @@ -1649,8 +1669,8 @@ TEST(BalanceTest, LeaderBalanceWithZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); - + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); { HostLeaderMap hostLeaderMap; hostLeaderMap[HostAddr("0", 0)][1] = {1, 3, 5, 7}; @@ -1727,7 +1747,8 @@ TEST(BalanceTest, LeaderBalanceWithLargerZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); { HostLeaderMap hostLeaderMap; hostLeaderMap[HostAddr("0", 0)][1] = {1, 5, 8}; @@ -1844,7 +1865,8 @@ TEST(BalanceTest, LeaderBalanceWithComplexZoneTest) { DefaultValue>::SetFactory( [] { return folly::Future(Status::OK()); }); NiceMock client; - Balancer balancer(kv, &client); + LeaderBalanceJobExecutor balancer( + testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); { HostLeaderMap hostLeaderMap; diff --git a/src/meta/test/GetStatsTest.cpp b/src/meta/test/GetStatsTest.cpp index aa6da0dd3d2..e4631e8c442 100644 --- a/src/meta/test/GetStatsTest.cpp +++ b/src/meta/test/GetStatsTest.cpp @@ -161,7 +161,7 @@ TEST_F(GetStatsTest, StatsJob) { // Insert running status statis data in prepare function of runJobInternal. // Update statis data to finished or failed status in finish function of // runJobInternal. - auto result = jobMgr->runJobInternal(statisJob); + auto result = jobMgr->runJobInternal(statisJob, JobManager::JbOp::ADD); ASSERT_TRUE(result); // JobManager does not set the job finished status in RunJobInternal function. // But set statis data. @@ -295,7 +295,7 @@ TEST_F(GetStatsTest, StatsJob) { // Insert running status statis data in prepare function of runJobInternal. // Update statis data to finished or failed status in finish function of // runJobInternal. - auto result2 = jobMgr->runJobInternal(statisJob2); + auto result2 = jobMgr->runJobInternal(statisJob2, JobManager::JbOp::ADD); auto jobId2 = statisJob2.getJobId(); auto statsKey2 = MetaKeyUtils::statsKey(spaceId); diff --git a/src/meta/test/JobManagerTest.cpp b/src/meta/test/JobManagerTest.cpp index dc4f92c6594..855b8563cdc 100644 --- a/src/meta/test/JobManagerTest.cpp +++ b/src/meta/test/JobManagerTest.cpp @@ -57,9 +57,9 @@ class JobManagerTest : public ::testing::Test { void TearDown() override { auto cleanUnboundQueue = [](auto& q) { - int32_t jobId = 0; + std::pair pair; while (!q.empty()) { - q.dequeue(jobId); + q.dequeue(pair); } }; cleanUnboundQueue(*jobMgr->lowPriorityQueue_); @@ -89,7 +89,7 @@ TEST_F(JobManagerTest, AddRebuildTagIndexJob) { JobDescription job(11, cpp2::AdminCmd::REBUILD_TAG_INDEX, paras); auto rc = jobMgr->addJob(job, adminClient_.get()); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); - auto result = jobMgr->runJobInternal(job); + auto result = jobMgr->runJobInternal(job, JobManager::JbOp::ADD); ASSERT_TRUE(result); } @@ -101,7 +101,7 @@ TEST_F(JobManagerTest, AddRebuildEdgeIndexJob) { JobDescription job(11, cpp2::AdminCmd::REBUILD_EDGE_INDEX, paras); auto rc = jobMgr->addJob(job, adminClient_.get()); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); - auto result = jobMgr->runJobInternal(job); + auto result = jobMgr->runJobInternal(job, JobManager::JbOp::ADD); ASSERT_TRUE(result); } @@ -113,7 +113,7 @@ TEST_F(JobManagerTest, StatsJob) { JobDescription job(12, cpp2::AdminCmd::STATS, paras); auto rc = jobMgr->addJob(job, adminClient_.get()); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); - auto result = jobMgr->runJobInternal(job); + auto result = jobMgr->runJobInternal(job, JobManager::JbOp::ADD); ASSERT_TRUE(result); // Function runJobInternal does not set the finished status of the job job.setStatus(cpp2::JobStatus::FINISHED); @@ -144,18 +144,18 @@ TEST_F(JobManagerTest, JobPriority) { ASSERT_EQ(2, jobMgr->jobSize()); - JobID jobId = 0; - auto result = jobMgr->try_dequeue(jobId); + std::pair opJobId; + auto result = jobMgr->try_dequeue(opJobId); ASSERT_TRUE(result); - ASSERT_EQ(14, jobId); + ASSERT_EQ(14, opJobId.second); ASSERT_EQ(1, jobMgr->jobSize()); - result = jobMgr->try_dequeue(jobId); + result = jobMgr->try_dequeue(opJobId); ASSERT_TRUE(result); - ASSERT_EQ(13, jobId); + ASSERT_EQ(13, opJobId.second); ASSERT_EQ(0, jobMgr->jobSize()); - result = jobMgr->try_dequeue(jobId); + result = jobMgr->try_dequeue(opJobId); ASSERT_FALSE(result); jobMgr->status_ = JobManager::JbmgrStatus::IDLE; @@ -196,18 +196,18 @@ TEST_F(JobManagerTest, JobDeduplication) { } ASSERT_EQ(2, jobMgr->jobSize()); - JobID jobId = 0; - auto result = jobMgr->try_dequeue(jobId); + std::pair opJobId; + auto result = jobMgr->try_dequeue(opJobId); ASSERT_TRUE(result); - ASSERT_EQ(16, jobId); + ASSERT_EQ(16, opJobId.second); ASSERT_EQ(1, jobMgr->jobSize()); - result = jobMgr->try_dequeue(jobId); + result = jobMgr->try_dequeue(opJobId); ASSERT_TRUE(result); - ASSERT_EQ(15, jobId); + ASSERT_EQ(15, opJobId.second); ASSERT_EQ(0, jobMgr->jobSize()); - result = jobMgr->try_dequeue(jobId); + result = jobMgr->try_dequeue(opJobId); ASSERT_FALSE(result); jobMgr->status_ = JobManager::JbmgrStatus::IDLE; } @@ -396,7 +396,7 @@ TEST_F(JobManagerTest, recoverJob) { jobMgr->save(jd.jobKey(), jd.jobVal()); } - auto nJobRecovered = jobMgr->recoverJob(spaceName); + auto nJobRecovered = jobMgr->recoverJob(spaceName, nullptr); ASSERT_EQ(nebula::value(nJobRecovered), 1); } diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index 1e1dd237f65..e85079af095 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -152,36 +152,6 @@ std::string GetConfigSentence::toString() const { return std::string("GET CONFIGS ") + configItem_->toString(); } -std::string BalanceSentence::toString() const { - switch (subType_) { - case SubType::kUnknown: - return "Unknown"; - case SubType::kLeader: - return "BALANCE LEADER"; - case SubType::kData: { - if (hostDel_ == nullptr) { - return "BALANCE DATA"; - } else { - std::stringstream ss; - ss << "BALANCE DATA REMOVE "; - ss << hostDel_->toString(); - return ss.str(); - } - } - case SubType::kDataStop: - return "BALANCE DATA STOP"; - case SubType::kDataReset: - return "BALANCE DATA RESET PLAN"; - case SubType::kShowBalancePlan: { - std::stringstream ss; - ss << "BALANCE DATA " << balanceId_; - return ss.str(); - } - } - DLOG(FATAL) << "Type illegal"; - return "Unknown"; -} - std::string HostList::toString() const { std::string buf; buf.reserve(256); @@ -263,6 +233,18 @@ std::string AdminJobSentence::toString() const { case meta::cpp2::AdminCmd::INGEST: return "INGEST"; case meta::cpp2::AdminCmd::DATA_BALANCE: + if (paras_.empty()) { + return "SUBMIT JOB BALANCE DATA"; + } else { + std::string str = "SUBMIT JOB BALANCE DATA REMOVE"; + for (size_t i = 0; i < paras_.size(); i++) { + auto &s = paras_[i]; + str += i == 0 ? " " + s : ", " + s; + } + return str; + } + case meta::cpp2::AdminCmd::LEADER_BALANCE: + return "SUBMIT JOB BALANCE LEADER"; case meta::cpp2::AdminCmd::UNKNOWN: return folly::stringPrintf("Unsupported AdminCmd: %s", apache::thrift::util::enumNameSafe(cmd_).c_str()); @@ -277,7 +259,15 @@ std::string AdminJobSentence::toString() const { CHECK_EQ(paras_.size(), 1U); return folly::stringPrintf("STOP JOB %s", paras_[0].c_str()); case meta::cpp2::AdminJobOp::RECOVER: - return "RECOVER JOB"; + if (paras_.empty()) { + return "RECOVER JOB"; + } else { + std::string str = "RECOVER JOB"; + for (size_t i = 0; i < paras_.size(); i++) { + str += i == 0 ? " " + paras_[i] : ", " + paras_[i]; + } + return str; + } } LOG(FATAL) << "Unknown job operation " << static_cast(op_); } diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index 71a3e994569..c774e724d1f 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -442,49 +442,6 @@ class GetConfigSentence final : public ConfigBaseSentence { std::string toString() const override; }; -class BalanceSentence final : public Sentence { - public: - enum class SubType : uint32_t { - kUnknown, - kLeader, - kData, - kDataStop, - kDataReset, - kShowBalancePlan, - }; - - // TODO: add more subtype for balance - explicit BalanceSentence(SubType subType) { - kind_ = Kind::kBalance; - subType_ = std::move(subType); - } - - explicit BalanceSentence(int64_t id) { - kind_ = Kind::kBalance; - subType_ = SubType::kShowBalancePlan; - balanceId_ = id; - } - - BalanceSentence(SubType subType, HostList* hostDel) { - kind_ = Kind::kBalance; - subType_ = std::move(subType); - hostDel_.reset(hostDel); - } - - std::string toString() const override; - - SubType subType() const { return subType_; } - - int64_t balanceId() const { return balanceId_; } - - HostList* hostDel() const { return hostDel_.get(); } - - private: - SubType subType_{SubType::kUnknown}; - int64_t balanceId_{0}; - std::unique_ptr hostDel_; -}; - class CreateSnapshotSentence final : public Sentence { public: CreateSnapshotSentence() { kind_ = Kind::kCreateSnapshot; } diff --git a/src/parser/Sentence.h b/src/parser/Sentence.h index 05552da6b89..f89801967ff 100644 --- a/src/parser/Sentence.h +++ b/src/parser/Sentence.h @@ -102,7 +102,6 @@ class Sentence { kGetConfig, kFetchVertices, kFetchEdges, - kBalance, kFindPath, kLimit, kGroupBy, diff --git a/src/parser/parser.yy b/src/parser/parser.yy index fce96a5aec6..94d3f20c15d 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -2985,6 +2985,36 @@ admin_job_sentence auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::RECOVER); $$ = sentence; } + | KW_RECOVER KW_JOB integer_list { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::RECOVER); + std::vector*intVec=$3; + for(int32_t i = 0; isize(); i++){ + sentence->addPara(std::to_string(intVec->at(i))); + } + delete intVec; + $$ = sentence; + } + | KW_SUBMIT KW_JOB KW_BALANCE KW_LEADER { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::LEADER_BALANCE); + $$ = sentence; + } + | KW_SUBMIT KW_JOB KW_BALANCE KW_DATA { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::DATA_BALANCE); + $$ = sentence; + } + | KW_SUBMIT KW_JOB KW_BALANCE KW_DATA KW_REMOVE host_list { + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::DATA_BALANCE); + HostList* hl = $6; + std::vector has = hl->hosts(); + for (HostAddr& ha: has) { + sentence->addPara(ha.toString()); + } + delete hl; + $$ = sentence; + } ; job_concurrency @@ -3405,22 +3435,30 @@ integer_list balance_sentence : KW_BALANCE KW_LEADER { - $$ = new BalanceSentence(BalanceSentence::SubType::kLeader); + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::LEADER_BALANCE); + $$ = sentence; } | KW_BALANCE KW_DATA { - $$ = new BalanceSentence(BalanceSentence::SubType::kData); + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::DATA_BALANCE); + $$ = sentence; } | KW_BALANCE KW_DATA legal_integer { - $$ = new BalanceSentence($3); - } - | KW_BALANCE KW_DATA KW_STOP { - $$ = new BalanceSentence(BalanceSentence::SubType::kDataStop); - } - | KW_BALANCE KW_DATA KW_RESET KW_PLAN { - $$ = new BalanceSentence(BalanceSentence::SubType::kDataReset); + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::SHOW); + sentence->addPara(std::to_string($3)); + $$ = sentence; } | KW_BALANCE KW_DATA KW_REMOVE host_list { - $$ = new BalanceSentence(BalanceSentence::SubType::kData, $4); + auto sentence = new AdminJobSentence(meta::cpp2::AdminJobOp::ADD, + meta::cpp2::AdminCmd::DATA_BALANCE); + HostList* hl = $4; + std::vector has = hl->hosts(); + for (HostAddr& ha: has) { + sentence->addPara(ha.toString()); + } + delete hl; + $$ = sentence; } ; diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index 29634fefabc..dedba7f9ad9 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -1957,11 +1957,6 @@ TEST_F(ParserTest, BalanceOperation) { auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } - { - std::string query = "BALANCE DATA STOP"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } { std::string query = "BALANCE DATA REMOVE 192.168.0.1:50000,192.168.0.1:50001"; auto result = parse(query); @@ -1972,11 +1967,6 @@ TEST_F(ParserTest, BalanceOperation) { auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } - { - std::string query = "BALANCE DATA RESET PLAN"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } } TEST_F(ParserTest, CrashByFuzzer) { @@ -2977,16 +2967,24 @@ TEST_F(ParserTest, JobTest) { checkTest("SUBMIT JOB FLUSH 111", "SUBMIT JOB FLUSH 111"); checkTest("SUBMIT JOB STATS", "SUBMIT JOB STATS"); checkTest("SUBMIT JOB STATS 111", "SUBMIT JOB STATS 111"); + checkTest("SUBMIT JOB BALANCE DATA", "SUBMIT JOB BALANCE DATA"); + checkTest( + "SUBMIT JOB BALANCE DATA REMOVE 192.168.0.1:50000, 192.168.0.1:50001, 192.168.0.1:50002", + "SUBMIT JOB BALANCE DATA REMOVE \"192.168.0.1\":50000, \"192.168.0.1\":50001, " + "\"192.168.0.1\":50002"); + checkTest("SUBMIT JOB BALANCE LEADER", "SUBMIT JOB BALANCE LEADER"); checkTest("SHOW JOBS", "SHOW JOBS"); checkTest("SHOW JOB 111", "SHOW JOB 111"); checkTest("STOP JOB 111", "STOP JOB 111"); checkTest("RECOVER JOB", "RECOVER JOB"); + checkTest("RECOVER JOB 111, 222, 333", "RECOVER JOB 111, 222, 333"); checkTest("REBUILD TAG INDEX name_index", "REBUILD TAG INDEX name_index"); checkTest("REBUILD EDGE INDEX name_index", "REBUILD EDGE INDEX name_index"); checkTest("REBUILD TAG INDEX", "REBUILD TAG INDEX "); checkTest("REBUILD EDGE INDEX", "REBUILD EDGE INDEX "); checkTest("REBUILD TAG INDEX name_index, age_index", "REBUILD TAG INDEX name_index,age_index"); checkTest("REBUILD EDGE INDEX name_index, age_index", "REBUILD EDGE INDEX name_index,age_index"); + checkTest("SUBMIT JOB COMPACT", "SUBMIT JOB COMPACT"); } TEST_F(ParserTest, ShowAndKillQueryTest) { diff --git a/src/webservice/test/CMakeLists.txt b/src/webservice/test/CMakeLists.txt index 92b16fe74f2..bfd9f50443e 100644 --- a/src/webservice/test/CMakeLists.txt +++ b/src/webservice/test/CMakeLists.txt @@ -15,8 +15,10 @@ nebula_add_test( $ $ $ + $ $ $ + $ LIBRARIES ${PROXYGEN_LIBRARIES} gtest @@ -36,8 +38,10 @@ nebula_add_test( $ $ $ + $ $ $ + $ LIBRARIES ${PROXYGEN_LIBRARIES} gtest @@ -57,8 +61,10 @@ nebula_add_test( $ $ $ + $ $ $ + $ LIBRARIES ${PROXYGEN_LIBRARIES} gtest @@ -74,11 +80,13 @@ nebula_add_test( $ $ $ + $ $ $ $ $ $ + $ LIBRARIES ${PROXYGEN_LIBRARIES} gtest From 5eda4b8ec771daafc81861a7ccf5c672440219b2 Mon Sep 17 00:00:00 2001 From: cpw <13495049+CPWstatic@users.noreply.github.com> Date: Wed, 17 Nov 2021 13:54:14 +0800 Subject: [PATCH 21/53] Traverse. (#3308) * Add traverse. * Implement buildResult. * Refactor expand. * Fix multi steps. * Add AppendVertices and fix range nullptr. * Fix override paths. * Impl AppendVertices and fix multi paths. * Using none_direct_dst and fix getting edge props. * Fix duplicate edges when multiple traverse. * Fix dedup and kind. * Fix dup edges. * Fix path release and reserve space by cnt. * Enhance stats. * Fix start from middle. * Fix scan nodes. * Support filter in traverse. * Fix node filter. * Fix yield path and edges. * Fix CollapseProjectRule test and fix empty result of traverse. * Fix traverse filter vertex. * Fix zero steps and some tests. * Fix path build. * Fix 1..1 and vertex props. * Fix 0 step path. * Fix start from some non-dedup vids. * Fix start from edge. * Fix match index select test. * Fix rel expr test. * Fix remove proj test. * Fix some test. * Remove unused code. * StepClause -> MatchStepRange. * Implement the explain and clone. * Rebase and fix compile and fix limit push down test. * Fix path build expr test. * Fix validator test. * Fix parser test. * Fix license. * Fix typo. * Rebase and fix. Co-authored-by: jimingquan --- src/common/datatypes/Edge.cpp | 10 + src/common/datatypes/Edge.h | 2 + src/common/expression/PathBuildExpression.cpp | 82 ++-- src/common/expression/PathBuildExpression.h | 4 +- .../expression/test/ExpressionContextMock.cpp | 4 +- src/common/function/FunctionManager.cpp | 43 +++ src/graph/context/Iterator.cpp | 103 +++-- src/graph/context/Iterator.h | 2 + src/graph/executor/CMakeLists.txt | 2 + src/graph/executor/Executor.cpp | 10 +- .../executor/query/AppendVerticesExecutor.cpp | 105 ++++++ .../executor/query/AppendVerticesExecutor.h | 33 ++ src/graph/executor/query/TraverseExecutor.cpp | 351 +++++++++++++++++ src/graph/executor/query/TraverseExecutor.h | 85 +++++ src/graph/planner/CMakeLists.txt | 1 - src/graph/planner/match/Expand.cpp | 274 -------------- src/graph/planner/match/Expand.h | 77 ---- src/graph/planner/match/LabelIndexSeek.cpp | 34 +- .../planner/match/MatchClausePlanner.cpp | 356 +++++++++--------- src/graph/planner/match/MatchClausePlanner.h | 15 +- src/graph/planner/match/MatchSolver.cpp | 2 +- src/graph/planner/match/PropIndexSeek.cpp | 34 +- src/graph/planner/match/VertexIdSeek.cpp | 36 +- src/graph/planner/match/VertexIdSeek.h | 4 +- src/graph/planner/ngql/PathPlanner.cpp | 2 +- src/graph/planner/ngql/SubgraphPlanner.cpp | 4 +- src/graph/planner/plan/PlanNode.cpp | 5 +- src/graph/planner/plan/PlanNode.h | 3 + src/graph/planner/plan/Query.cpp | 47 +++ src/graph/planner/plan/Query.h | 124 +++++- src/graph/util/SchemaUtil.cpp | 4 +- src/graph/util/SchemaUtil.h | 2 +- src/graph/validator/MatchValidator.cpp | 67 ++-- src/graph/validator/MatchValidator.h | 9 +- .../validator/test/MatchValidatorTest.cpp | 237 ++++-------- .../validator/test/QueryValidatorTest.cpp | 92 ++--- src/parser/MatchSentence.cpp | 6 +- src/parser/MatchSentence.h | 8 +- src/parser/parser.yy | 28 +- .../expression/RelationalExpr.feature | 15 +- .../tck/features/expression/UnaryExpr.feature | 15 +- tests/tck/features/match/With.feature | 21 +- .../optimizer/CollapseProjectRule.feature | 15 +- .../optimizer/CombineFilterRule.feature | 20 +- .../features/optimizer/IndexScanRule.feature | 85 ++--- .../MergeGetNbrsDedupProjectRule.feature | 2 + .../MergeGetVerticesDedupProjectRule.feature | 2 + .../PushLimitDownProjectRule.feature | 25 +- .../RemoveUselessProjectRule.feature | 32 +- 49 files changed, 1450 insertions(+), 1089 deletions(-) create mode 100644 src/graph/executor/query/AppendVerticesExecutor.cpp create mode 100644 src/graph/executor/query/AppendVerticesExecutor.h create mode 100644 src/graph/executor/query/TraverseExecutor.cpp create mode 100644 src/graph/executor/query/TraverseExecutor.h delete mode 100644 src/graph/planner/match/Expand.cpp delete mode 100644 src/graph/planner/match/Expand.h diff --git a/src/common/datatypes/Edge.cpp b/src/common/datatypes/Edge.cpp index ea11b5ec240..fad349cef7b 100644 --- a/src/common/datatypes/Edge.cpp +++ b/src/common/datatypes/Edge.cpp @@ -131,6 +131,16 @@ void Edge::clear() { props.clear(); } +bool Edge::keyEqual(const Edge& rhs) const { + if (type != rhs.type && type != -rhs.type) { + return false; + } + if (type == rhs.type) { + return src == rhs.src && dst == rhs.dst && ranking == rhs.ranking; + } + return src == rhs.dst && dst == rhs.src && ranking == rhs.ranking; +} + } // namespace nebula namespace std { diff --git a/src/common/datatypes/Edge.h b/src/common/datatypes/Edge.h index 65bd439855d..5fe10af251b 100644 --- a/src/common/datatypes/Edge.h +++ b/src/common/datatypes/Edge.h @@ -68,6 +68,8 @@ struct Edge { bool contains(const Value& key) const; const Value& value(const std::string& key) const; + + bool keyEqual(const Edge& rhs) const; }; inline std::ostream& operator<<(std::ostream& os, const Edge& v) { return os << v.toString(); } diff --git a/src/common/expression/PathBuildExpression.cpp b/src/common/expression/PathBuildExpression.cpp index fba2c5791e3..03a274b83b8 100644 --- a/src/common/expression/PathBuildExpression.cpp +++ b/src/common/expression/PathBuildExpression.cpp @@ -26,46 +26,60 @@ const Value& PathBuildExpression::eval(ExpressionContext& ctx) { for (size_t i = 1; i < items_.size(); ++i) { auto& value = items_[i]->eval(ctx); - if (value.isEdge()) { + if (!buildPath(value, path)) { + return Value::kNullBadData; + } + } + + result_ = path; + return result_; +} + +bool PathBuildExpression::buildPath(const Value& value, Path& path) const { + switch (value.type()) { + case Value::Type::EDGE: { + Step step; if (!path.steps.empty()) { - const auto& lastStep = path.steps.back(); - const auto& edge = value.getEdge(); - if (lastStep.dst.vid != edge.src) { - return Value::kNullBadData; - } + getEdge(value, path.steps.back().dst.vid, step); + } else { + getEdge(value, path.src.vid, step); } - Step step; - getEdge(value, step); path.steps.emplace_back(std::move(step)); - } else if (value.isVertex()) { + break; + } + case Value::Type::VERTEX: { if (path.steps.empty()) { - return Value::kNullBadData; + if (path.src.vid == value.getVertex().vid) { + return true; + } + return false; } auto& lastStep = path.steps.back(); - const auto& vert = value.getVertex(); - if (lastStep.dst.vid != vert.vid) { - return Value::kNullBadData; - } getVertex(value, lastStep.dst); - } else if (value.isPath()) { + break; + } + case Value::Type::PATH: { const auto& p = value.getPath(); if (!path.steps.empty()) { auto& lastStep = path.steps.back(); - if (lastStep.dst.vid != p.src.vid) { - return Value::kNullBadData; - } lastStep.dst = p.src; } path.steps.insert(path.steps.end(), p.steps.begin(), p.steps.end()); - } else { - if ((i & 1) == 1 || path.steps.empty() || !getVertex(value, path.steps.back().dst)) { - return Value::kNullBadData; + break; + } + case Value::Type::LIST: { + for (const auto& val : value.getList().values) { + if (!buildPath(val, path)) { + return false; + } } + break; + } + default: { + return false; } } - - result_ = path; - return result_; + return true; } bool PathBuildExpression::getVertex(const Value& value, Vertex& vertex) const { @@ -84,18 +98,26 @@ bool PathBuildExpression::getVertex(const Value& value, Vertex& vertex) const { return false; } -bool PathBuildExpression::getEdge(const Value& value, Step& step) const { - if (value.isEdge()) { - const auto& edge = value.getEdge(); +bool PathBuildExpression::getEdge(const Value& value, const Value& lastStepVid, Step& step) const { + if (!value.isEdge()) { + return false; + } + + const auto& edge = value.getEdge(); + if (lastStepVid == edge.src) { step.type = edge.type; step.name = edge.name; step.ranking = edge.ranking; step.props = edge.props; step.dst.vid = edge.dst; - return true; + } else { + step.type = -edge.type; + step.name = edge.name; + step.ranking = edge.ranking; + step.props = edge.props; + step.dst.vid = edge.src; } - - return false; + return true; } bool PathBuildExpression::operator==(const Expression& rhs) const { diff --git a/src/common/expression/PathBuildExpression.h b/src/common/expression/PathBuildExpression.h index f1f03e62726..aaea497db9d 100644 --- a/src/common/expression/PathBuildExpression.h +++ b/src/common/expression/PathBuildExpression.h @@ -56,7 +56,9 @@ class PathBuildExpression final : public Expression { bool getVertex(const Value& value, Vertex& vertex) const; - bool getEdge(const Value& value, Step& step) const; + bool getEdge(const Value& value, const Value& lastStepVid, Step& step) const; + + bool buildPath(const Value& value, Path& path) const; private: std::vector items_; diff --git a/src/common/expression/test/ExpressionContextMock.cpp b/src/common/expression/test/ExpressionContextMock.cpp index b0318536710..9a54ce323d1 100644 --- a/src/common/expression/test/ExpressionContextMock.cpp +++ b/src/common/expression/test/ExpressionContextMock.cpp @@ -36,9 +36,9 @@ std::unordered_map ExpressionContextMock::vals_ = { {"srcProperty", Value(13)}, {"dstProperty", Value(3)}, - {"path_src", Value("1")}, + {"path_src", Value(Vertex("1", {}))}, {"path_edge1", Value(Edge("1", "2", 1, "edge", 0, {}))}, - {"path_v1", Value("2")}, + {"path_v1", Value(Vertex("2", {}))}, {"path_edge2", Value(Edge("2", "3", 1, "edge", 0, {}))}, {"path_v2", Value(Vertex("3", {}))}, {"path_edge3", Value(Edge("3", "4", 1, "edge", 0, {}))}, diff --git a/src/common/function/FunctionManager.cpp b/src/common/function/FunctionManager.cpp index 2a38b6736f3..097c3eba278 100644 --- a/src/common/function/FunctionManager.cpp +++ b/src/common/function/FunctionManager.cpp @@ -413,6 +413,7 @@ std::unordered_map> FunctionManager::typ Value::Type::FLOAT}, Value::Type::LIST), }}, + {"is_edge", {TypeSignature({Value::Type::EDGE}, Value::Type::BOOL)}}, }; // static @@ -1945,6 +1946,41 @@ FunctionManager::FunctionManager() { } }; } + { + auto &attr = functions_["none_direct_dst"]; + attr.minArity_ = 1; + attr.maxArity_ = 1; + attr.isPure_ = true; + attr.body_ = [](const auto &args) -> Value { + switch (args[0].get().type()) { + case Value::Type::NULLVALUE: { + return Value::kNullValue; + } + case Value::Type::EDGE: { + const auto &edge = args[0].get().getEdge(); + return edge.dst; + } + case Value::Type::VERTEX: { + const auto &v = args[0].get().getVertex(); + return v.vid; + } + case Value::Type::LIST: { + const auto &listVal = args[0].get().getList(); + auto &lastVal = listVal.values.back(); + if (lastVal.isEdge()) { + return lastVal.getEdge().dst; + } else if (lastVal.isVertex()) { + return lastVal.getVertex().vid; + } else { + return Value::kNullBadType; + } + } + default: { + return Value::kNullBadType; + } + } + }; + } { auto &attr = functions_["rank"]; attr.minArity_ = 1; @@ -2636,6 +2672,13 @@ FunctionManager::FunctionManager() { return List(vals); }; } + { + auto &attr = functions_["is_edge"]; + attr.minArity_ = 1; + attr.maxArity_ = 1; + attr.isPure_ = true; + attr.body_ = [](const auto &args) -> Value { return args[0].get().isEdge(); }; + } } // NOLINT // static diff --git a/src/graph/context/Iterator.cpp b/src/graph/context/Iterator.cpp index fad0b67d64c..e14a4e6ede4 100644 --- a/src/graph/context/Iterator.cpp +++ b/src/graph/context/Iterator.cpp @@ -324,26 +324,54 @@ const Value& GetNeighborsIter::getTagProp(const std::string& tag, const std::str return Value::kNullValue; } - auto& tagPropIndices = currentDs_->tagPropsMap; - auto index = tagPropIndices.find(tag); - if (index == tagPropIndices.end()) { - return Value::kEmpty; - } - auto propIndex = index->second.propIndices.find(prop); - if (propIndex == index->second.propIndices.end()) { - return Value::kEmpty; - } - auto colId = index->second.colIdx; + size_t colId = 0; + size_t propId = 0; auto& row = *currentRow_; - DCHECK_GT(row.size(), colId); - if (row[colId].empty()) { + if (tag == "*") { + for (auto& index : currentDs_->tagPropsMap) { + auto propIndex = index.second.propIndices.find(prop); + if (propIndex != index.second.propIndices.end()) { + colId = index.second.colIdx; + propId = propIndex->second; + DCHECK_GT(row.size(), colId); + if (row[colId].empty()) { + continue; + } + if (!row[colId].isList()) { + return Value::kNullBadType; + } + auto& list = row[colId].getList(); + auto& val = list.values[propId]; + if (val.empty()) { + continue; + } else { + return val; + } + } + } return Value::kEmpty; + } else { + auto& tagPropIndices = currentDs_->tagPropsMap; + auto index = tagPropIndices.find(tag); + if (index == tagPropIndices.end()) { + return Value::kEmpty; + } + auto propIndex = index->second.propIndices.find(prop); + if (propIndex == index->second.propIndices.end()) { + return Value::kEmpty; + } + colId = index->second.colIdx; + propId = propIndex->second; + DCHECK_GT(row.size(), colId); + if (row[colId].empty()) { + return Value::kEmpty; + } + if (!row[colId].isList()) { + return Value::kNullBadType; + } + auto& list = row[colId].getList(); + return list.values[propId]; } - if (!row[colId].isList()) { - return Value::kNullBadType; - } - auto& list = row[colId].getList(); - return list.values[propIndex->second]; } const Value& GetNeighborsIter::getEdgeProp(const std::string& edge, const std::string& prop) const { @@ -688,20 +716,37 @@ const Value& PropIter::getProp(const std::string& name, const std::string& prop) return Value::kNullValue; } auto& propsMap = dsIndex_.propsMap; - auto index = propsMap.find(name); - if (index == propsMap.end()) { - return Value::kEmpty; - } - - auto propIndex = index->second.find(prop); - if (propIndex == index->second.end()) { - VLOG(1) << "No prop found : " << prop; + size_t colId = 0; + auto& row = *iter_; + if (name == "*") { + for (auto& index : propsMap) { + auto propIndex = index.second.find(prop); + if (propIndex == index.second.end()) { + continue; + } + colId = propIndex->second; + DCHECK_GT(row.size(), colId); + auto& val = row[colId]; + if (val.empty()) { + continue; + } else { + return val; + } + } return Value::kNullValue; + } else { + auto index = propsMap.find(name); + if (index == propsMap.end()) { + return Value::kEmpty; + } + auto propIndex = index->second.find(prop); + if (propIndex == index->second.end()) { + return Value::kNullValue; + } + colId = propIndex->second; + DCHECK_GT(row.size(), colId); + return row[colId]; } - auto colId = propIndex->second; - auto& row = *iter_; - DCHECK_GT(row.size(), colId); - return row[colId]; } Value PropIter::getVertex(const std::string& name) const { diff --git a/src/graph/context/Iterator.h b/src/graph/context/Iterator.h index ff0217d8596..562117e99b6 100644 --- a/src/graph/context/Iterator.h +++ b/src/graph/context/Iterator.h @@ -436,6 +436,8 @@ class SequentialIter : public Iterator { // Notice: We only use this interface when return results to client. friend class DataCollectExecutor; + friend class AppendVerticesExecutor; + friend class TraverseExecutor; Row&& moveRow() { return std::move(*iter_); } void doReset(size_t pos) override; diff --git a/src/graph/executor/CMakeLists.txt b/src/graph/executor/CMakeLists.txt index a1649d87ff6..261f9128113 100644 --- a/src/graph/executor/CMakeLists.txt +++ b/src/graph/executor/CMakeLists.txt @@ -34,6 +34,8 @@ nebula_add_library( query/InnerJoinExecutor.cpp query/IndexScanExecutor.cpp query/AssignExecutor.cpp + query/TraverseExecutor.cpp + query/AppendVerticesExecutor.cpp algo/ConjunctPathExecutor.cpp algo/BFSShortestPathExecutor.cpp algo/ProduceSemiShortestPathExecutor.cpp diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index db8178cea46..93115ba4fc8 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -65,6 +65,7 @@ #include "graph/executor/mutate/InsertExecutor.h" #include "graph/executor/mutate/UpdateExecutor.h" #include "graph/executor/query/AggregateExecutor.h" +#include "graph/executor/query/AppendVerticesExecutor.h" #include "graph/executor/query/AssignExecutor.h" #include "graph/executor/query/DataCollectExecutor.h" #include "graph/executor/query/DedupExecutor.h" @@ -82,6 +83,7 @@ #include "graph/executor/query/SampleExecutor.h" #include "graph/executor/query/SortExecutor.h" #include "graph/executor/query/TopNExecutor.h" +#include "graph/executor/query/TraverseExecutor.h" #include "graph/executor/query/UnionAllVersionVarExecutor.h" #include "graph/executor/query/UnionExecutor.h" #include "graph/executor/query/UnwindExecutor.h" @@ -507,6 +509,12 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kKillQuery: { return pool->add(new KillQueryExecutor(node, qctx)); } + case PlanNode::Kind::kTraverse: { + return pool->add(new TraverseExecutor(node, qctx)); + } + case PlanNode::Kind::kAppendVertices: { + return pool->add(new AppendVerticesExecutor(node, qctx)); + } case PlanNode::Kind::kUnknown: { LOG(FATAL) << "Unknown plan node kind " << static_cast(node->kind()); break; @@ -583,7 +591,7 @@ void Executor::drop() { // Make sure drop happened-after count decrement CHECK_EQ(inputVar->userCount.load(std::memory_order_acquire), 0); ectx_->dropResult(inputVar->name); - VLOG(1) << "Drop variable " << node()->outputVar(); + VLOG(1) << node()->kind() << " Drop variable " << inputVar->name; } } } diff --git a/src/graph/executor/query/AppendVerticesExecutor.cpp b/src/graph/executor/query/AppendVerticesExecutor.cpp new file mode 100644 index 00000000000..94f67efc581 --- /dev/null +++ b/src/graph/executor/query/AppendVerticesExecutor.cpp @@ -0,0 +1,105 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "graph/executor/query/AppendVerticesExecutor.h" + +using nebula::storage::GraphStorageClient; +using nebula::storage::StorageRpcResponse; +using nebula::storage::cpp2::GetPropResponse; + +namespace nebula { +namespace graph { +folly::Future AppendVerticesExecutor::execute() { return appendVertices(); } + +DataSet AppendVerticesExecutor::buildRequestDataSet(const AppendVertices *av) { + if (av == nullptr) { + return nebula::DataSet({kVid}); + } + auto valueIter = ectx_->getResult(av->inputVar()).iter(); + return buildRequestDataSetByVidType(valueIter.get(), av->src(), av->dedup()); +} + +folly::Future AppendVerticesExecutor::appendVertices() { + SCOPED_TIMER(&execTime_); + + auto *av = asNode(node()); + GraphStorageClient *storageClient = qctx()->getStorageClient(); + + DataSet vertices = buildRequestDataSet(av); + if (vertices.rows.empty()) { + return finish(ResultBuilder().value(Value(DataSet(av->colNames()))).build()); + } + + GraphStorageClient::CommonRequestParam param(av->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); + time::Duration getPropsTime; + return DCHECK_NOTNULL(storageClient) + ->getProps(param, + std::move(vertices), + av->props(), + nullptr, + av->exprs(), + av->dedup(), + av->orderBy(), + av->limit(), + av->filter()) + .via(runner()) + .ensure([this, getPropsTime]() { + SCOPED_TIMER(&execTime_); + otherStats_.emplace("total_rpc", folly::sformat("{}(us)", getPropsTime.elapsedInUSec())); + }) + .thenValue([this](StorageRpcResponse &&rpcResp) { + SCOPED_TIMER(&execTime_); + addStats(rpcResp, otherStats_); + return handleResp(std::move(rpcResp)); + }); +} + +Status AppendVerticesExecutor::handleResp( + storage::StorageRpcResponse &&rpcResp) { + auto result = handleCompleteness(rpcResp, FLAGS_accept_partial_success); + NG_RETURN_IF_ERROR(result); + auto state = std::move(result).value(); + std::unordered_map map; + auto *av = asNode(node()); + auto *vFilter = av->vFilter(); + QueryExpressionContext ctx(qctx()->ectx()); + for (auto &resp : rpcResp.responses()) { + if (resp.props_ref().has_value()) { + auto iter = PropIter(std::make_shared(std::move(*resp.props_ref()))); + for (; iter.valid(); iter.next()) { + if (vFilter != nullptr) { + auto &vFilterVal = vFilter->eval(ctx(&iter)); + if (!vFilterVal.isBool() || !vFilterVal.getBool()) { + continue; + } + } + map.emplace(iter.getColumn(kVid), iter.getVertex()); + } + } + } + + auto iter = qctx()->ectx()->getResult(av->inputVar()).iter(); + auto *src = av->src(); + + DataSet ds; + ds.colNames = av->colNames(); + ds.rows.reserve(iter->size()); + for (; iter->valid(); iter->next()) { + auto dstFound = map.find(src->eval(ctx(iter.get()))); + auto row = static_cast(iter.get())->moveRow(); + if (dstFound == map.end()) { + continue; + } + row.values.emplace_back(dstFound->second); + ds.rows.emplace_back(std::move(row)); + } + return finish(ResultBuilder().value(Value(std::move(ds))).state(state).build()); +} + +} // namespace graph +} // namespace nebula diff --git a/src/graph/executor/query/AppendVerticesExecutor.h b/src/graph/executor/query/AppendVerticesExecutor.h new file mode 100644 index 00000000000..109b7a08fc2 --- /dev/null +++ b/src/graph/executor/query/AppendVerticesExecutor.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef GRAPH_EXECUTOR_QUERY_APPENDVERTICESEXECUTOR_H_ +#define GRAPH_EXECUTOR_QUERY_APPENDVERTICESEXECUTOR_H_ + +#include "graph/executor/query/GetPropExecutor.h" +#include "graph/planner/plan/Query.h" + +namespace nebula { +namespace graph { + +class AppendVerticesExecutor final : public GetPropExecutor { + public: + AppendVerticesExecutor(const PlanNode *node, QueryContext *qctx) + : GetPropExecutor("AppendVerticesExecutor", node, qctx) {} + + folly::Future execute() override; + + private: + DataSet buildRequestDataSet(const AppendVertices *gv); + + folly::Future appendVertices(); + + Status handleResp(storage::StorageRpcResponse &&rpcResp); +}; + +} // namespace graph +} // namespace nebula + +#endif // GRAPH_EXECUTOR_QUERY_GETVERTICESEXECUTOR_H_ diff --git a/src/graph/executor/query/TraverseExecutor.cpp b/src/graph/executor/query/TraverseExecutor.cpp new file mode 100644 index 00000000000..d2e23e2d458 --- /dev/null +++ b/src/graph/executor/query/TraverseExecutor.cpp @@ -0,0 +1,351 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "graph/executor/query/TraverseExecutor.h" + +#include + +#include "clients/storage/GraphStorageClient.h" +#include "common/datatypes/List.h" +#include "common/datatypes/Vertex.h" +#include "common/time/ScopedTimer.h" +#include "graph/context/QueryContext.h" +#include "graph/service/GraphFlags.h" +#include "graph/util/SchemaUtil.h" + +using nebula::storage::GraphStorageClient; +using nebula::storage::StorageRpcResponse; +using nebula::storage::cpp2::GetNeighborsResponse; + +namespace nebula { +namespace graph { + +folly::Future TraverseExecutor::execute() { + range_ = traverse_->stepRange(); + auto status = buildRequestDataSet(); + if (!status.ok()) { + return error(std::move(status)); + } + return traverse(); +} + +Status TraverseExecutor::close() { + // clear the members + reqDs_.rows.clear(); + return Executor::close(); +} + +Status TraverseExecutor::buildRequestDataSet() { + SCOPED_TIMER(&execTime_); + auto inputVar = traverse_->inputVar(); + auto& inputResult = ectx_->getResult(inputVar); + auto inputIter = inputResult.iter(); + auto iter = static_cast(inputIter.get()); + + reqDs_.colNames = {kVid}; + reqDs_.rows.reserve(iter->size()); + + std::unordered_set uniqueSet; + uniqueSet.reserve(iter->size()); + std::unordered_map prev; + const auto& spaceInfo = qctx()->rctx()->session()->space(); + const auto& vidType = *(spaceInfo.spaceDesc.vid_type_ref()); + auto* src = traverse_->src(); + QueryExpressionContext ctx(ectx_); + + for (; iter->valid(); iter->next()) { + auto vid = src->eval(ctx(iter)); + if (!SchemaUtil::isValidVid(vid, vidType)) { + LOG(WARNING) << "Mismatched vid type: " << vid.type() + << ", space vid type: " << SchemaUtil::typeToString(vidType); + continue; + } + buildPath(prev, vid, iter->moveRow()); + if (!uniqueSet.emplace(vid).second) { + continue; + } + reqDs_.emplace_back(Row({std::move(vid)})); + } + paths_.emplace_back(std::move(prev)); + return Status::OK(); +} + +folly::Future TraverseExecutor::traverse() { + if (reqDs_.rows.empty()) { + VLOG(1) << "Empty input."; + DataSet emptyResult; + return finish(ResultBuilder().value(Value(std::move(emptyResult))).build()); + } + getNeighbors(); + return promise_.getFuture(); +} + +void TraverseExecutor::getNeighbors() { + currentStep_++; + time::Duration getNbrTime; + GraphStorageClient* storageClient = qctx_->getStorageClient(); + bool finalStep = isFinalStep(); + GraphStorageClient::CommonRequestParam param(traverse_->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); + storageClient + ->getNeighbors(param, + reqDs_.colNames, + std::move(reqDs_.rows), + traverse_->edgeTypes(), + traverse_->edgeDirection(), + finalStep ? traverse_->statProps() : nullptr, + traverse_->vertexProps(), + traverse_->edgeProps(), + finalStep ? traverse_->exprs() : nullptr, + finalStep ? traverse_->dedup() : false, + finalStep ? traverse_->random() : false, + finalStep ? traverse_->orderBy() : std::vector(), + finalStep ? traverse_->limit() : -1, + finalStep ? traverse_->filter() : nullptr) + .via(runner()) + .thenValue([this, getNbrTime](StorageRpcResponse&& resp) mutable { + SCOPED_TIMER(&execTime_); + addStats(resp, getNbrTime.elapsedInUSec()); + handleResponse(resp); + }); +} + +void TraverseExecutor::addStats(RpcResponse& resp, int64_t getNbrTimeInUSec) { + auto& hostLatency = resp.hostLatency(); + std::stringstream ss; + ss << "{\n"; + for (size_t i = 0; i < hostLatency.size(); ++i) { + size_t size = 0u; + auto& result = resp.responses()[i]; + if (result.vertices_ref().has_value()) { + size = (*result.vertices_ref()).size(); + } + auto& info = hostLatency[i]; + ss << "{" << folly::sformat("{} exec/total/vertices: ", std::get<0>(info).toString()) + << folly::sformat("{}(us)/{}(us)/{},", std::get<1>(info), std::get<2>(info), size) << "\n" + << folly::sformat("total_rpc_time: {}(us)", getNbrTimeInUSec) << "\n"; + auto detail = getStorageDetail(result.result.latency_detail_us_ref()); + if (!detail.empty()) { + ss << folly::sformat("storage_detail: {}", detail); + } + ss << "\n}"; + } + ss << "\n}"; + otherStats_.emplace(folly::sformat("step {}", currentStep_), ss.str()); +} + +void TraverseExecutor::handleResponse(RpcResponse& resps) { + SCOPED_TIMER(&execTime_); + auto result = handleCompleteness(resps, FLAGS_accept_partial_success); + if (!result.ok()) { + promise_.setValue(std::move(result).status()); + } + + auto& responses = resps.responses(); + List list; + for (auto& resp : responses) { + auto dataset = resp.get_vertices(); + if (dataset == nullptr) { + LOG(INFO) << "Empty dataset in response"; + continue; + } + list.values.emplace_back(std::move(*dataset)); + } + auto listVal = std::make_shared(std::move(list)); + auto iter = std::make_unique(listVal); + + auto status = buildInterimPath(iter.get()); + if (!status.ok()) { + promise_.setValue(status); + return; + } + if (!isFinalStep()) { + if (reqDs_.rows.empty()) { + if (range_ != nullptr) { + promise_.setValue(buildResult()); + } else { + promise_.setValue(Status::OK()); + } + } else { + getNeighbors(); + } + } else { + promise_.setValue(buildResult()); + } +} + +Status TraverseExecutor::buildInterimPath(GetNeighborsIter* iter) { + const auto& spaceInfo = qctx()->rctx()->session()->space(); + DataSet reqDs; + reqDs.colNames = reqDs_.colNames; + size_t count = 0; + + const std::unordered_map& prev = paths_.back(); + if (currentStep_ == 1 && zeroStep()) { + paths_.emplace_back(); + NG_RETURN_IF_ERROR(handleZeroStep(prev, iter->getVertices(), paths_.back(), count)); + // If 0..0 case, release memory and return immediately. + if (range_ != nullptr && range_->max() == 0) { + releasePrevPaths(count); + return Status::OK(); + } + } + paths_.emplace_back(); + std::unordered_map& current = paths_.back(); + + auto* vFilter = traverse_->vFilter(); + auto* eFilter = traverse_->eFilter(); + QueryExpressionContext ctx(ectx_); + std::unordered_set uniqueDst; + + for (; iter->valid(); iter->next()) { + auto& dst = iter->getEdgeProp("*", kDst); + if (!SchemaUtil::isValidVid(dst, *(spaceInfo.spaceDesc.vid_type_ref()))) { + continue; + } + if (vFilter != nullptr && currentStep_ == 1) { + auto& vFilterVal = vFilter->eval(ctx(iter)); + if (!vFilterVal.isBool() || !vFilterVal.getBool()) { + continue; + } + } + if (eFilter != nullptr) { + auto& eFilterVal = eFilter->eval(ctx(iter)); + if (!eFilterVal.isBool() || !eFilterVal.getBool()) { + continue; + } + } + auto srcV = iter->getVertex(); + auto e = iter->getEdge(); + // Join on dst = src + auto pathToSrcFound = prev.find(srcV.getVertex().vid); + if (pathToSrcFound == prev.end()) { + return Status::Error("Can't find prev paths."); + } + const auto& paths = pathToSrcFound->second; + for (auto& prevPath : paths) { + if (hasSameEdge(prevPath, e.getEdge())) { + continue; + } + if (uniqueDst.emplace(dst).second) { + reqDs.rows.emplace_back(Row({std::move(dst)})); + } + auto path = prevPath; + if (currentStep_ == 1) { + path.values.emplace_back(srcV); + List neighbors; + neighbors.values.emplace_back(e); + path.values.emplace_back(std::move(neighbors)); + buildPath(current, dst, std::move(path)); + ++count; + } else { + auto& eList = path.values.back().mutableList().values; + eList.emplace_back(srcV); + eList.emplace_back(e); + buildPath(current, dst, std::move(path)); + ++count; + } + } // `prevPath' + } // `iter' + + releasePrevPaths(count); + reqDs_ = std::move(reqDs); + return Status::OK(); +} + +void TraverseExecutor::buildPath(std::unordered_map>& currentPaths, + const Value& dst, + Row&& path) { + auto pathToDstFound = currentPaths.find(dst); + if (pathToDstFound == currentPaths.end()) { + Paths interimPaths; + interimPaths.emplace_back(std::move(path)); + currentPaths.emplace(dst, std::move(interimPaths)); + } else { + auto& interimPaths = pathToDstFound->second; + interimPaths.emplace_back(std::move(path)); + } +} + +Status TraverseExecutor::buildResult() { + // This means we are reaching a dead end, return empty. + if (range_ != nullptr && currentStep_ < range_->min()) { + return finish(ResultBuilder().value(Value(DataSet())).build()); + } + + DataSet result; + result.colNames = traverse_->colNames(); + result.rows.reserve(cnt_); + for (auto& currentStepPaths : paths_) { + for (auto& paths : currentStepPaths) { + std::move(paths.second.begin(), paths.second.end(), std::back_inserter(result.rows)); + } + } + + return finish(ResultBuilder().value(Value(std::move(result))).build()); +} + +bool TraverseExecutor::hasSameEdge(const Row& prevPath, const Edge& currentEdge) { + for (const auto& v : prevPath.values) { + if (v.isList()) { + for (const auto& e : v.getList().values) { + if (e.isEdge() && e.getEdge().keyEqual(currentEdge)) { + return true; + } + } + } + } + return false; +} + +void TraverseExecutor::releasePrevPaths(size_t cnt) { + if (range_ != nullptr) { + if (currentStep_ == range_->min() && paths_.size() > 1) { + auto rangeEnd = paths_.begin(); + std::advance(rangeEnd, paths_.size() - 1); + paths_.erase(paths_.begin(), rangeEnd); + } else if (range_->min() == 0 && currentStep_ == 1 && paths_.size() > 1) { + paths_.pop_front(); + } + + if (currentStep_ >= range_->min()) { + cnt_ += cnt; + } + } else { + paths_.pop_front(); + cnt_ = cnt; + } +} + +Status TraverseExecutor::handleZeroStep(const std::unordered_map& prev, + List&& vertices, + std::unordered_map& zeroSteps, + size_t& count) { + std::unordered_set uniqueSrc; + for (auto& srcV : vertices.values) { + auto src = srcV.getVertex().vid; + if (!uniqueSrc.emplace(src).second) { + continue; + } + auto pathToSrcFound = prev.find(src); + if (pathToSrcFound == prev.end()) { + return Status::Error("Can't find prev paths."); + } + const auto& paths = pathToSrcFound->second; + for (auto path : paths) { + path.values.emplace_back(srcV); + List neighbors; + neighbors.values.emplace_back(srcV); + path.values.emplace_back(std::move(neighbors)); + buildPath(zeroSteps, src, std::move(path)); + ++count; + } + } + return Status::OK(); +} +} // namespace graph +} // namespace nebula diff --git a/src/graph/executor/query/TraverseExecutor.h b/src/graph/executor/query/TraverseExecutor.h new file mode 100644 index 00000000000..4f2802cb68d --- /dev/null +++ b/src/graph/executor/query/TraverseExecutor.h @@ -0,0 +1,85 @@ +/* Copyright (c) 2021 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef EXECUTOR_QUERY_TRAVERSEEXECUTOR_H_ +#define EXECUTOR_QUERY_TRAVERSEEXECUTOR_H_ + +#include + +#include "clients/storage/GraphStorageClient.h" +#include "common/base/StatusOr.h" +#include "common/datatypes/Value.h" +#include "common/datatypes/Vertex.h" +#include "graph/executor/StorageAccessExecutor.h" +#include "graph/planner/plan/Query.h" +#include "interface/gen-cpp2/storage_types.h" + +namespace nebula { +namespace graph { + +using RpcResponse = storage::StorageRpcResponse; + +class TraverseExecutor final : public StorageAccessExecutor { + public: + TraverseExecutor(const PlanNode* node, QueryContext* qctx) + : StorageAccessExecutor("Traverse", node, qctx) { + traverse_ = asNode(node); + } + + folly::Future execute() override; + + Status close() override; + + private: + using Dst = Value; + using Paths = std::vector; + Status buildRequestDataSet(); + + folly::Future traverse(); + + void addStats(RpcResponse& resps, int64_t getNbrTimeInUSec); + + void getNeighbors(); + + void handleResponse(RpcResponse& resps); + + Status buildInterimPath(GetNeighborsIter* iter); + + Status buildResult(); + + bool isFinalStep() const { + return (range_ == nullptr && currentStep_ == 1) || + (range_ != nullptr && (currentStep_ == range_->max() || range_->max() == 0)); + } + + bool zeroStep() const { return range_ != nullptr && range_->min() == 0; } + + bool hasSameEdge(const Row& prevPath, const Edge& currentEdge); + + void releasePrevPaths(size_t cnt); + + void buildPath(std::unordered_map>& currentPaths, + const Value& dst, + Row&& path); + + Status handleZeroStep(const std::unordered_map& prev, + List&& vertices, + std::unordered_map& zeroSteps, + size_t& count); + + private: + DataSet reqDs_; + const Traverse* traverse_{nullptr}; + folly::Promise promise_; + MatchStepRange* range_{nullptr}; + size_t currentStep_{0}; + std::list> paths_; + size_t cnt_{0}; +}; + +} // namespace graph +} // namespace nebula + +#endif // EXECUTOR_QUERY_TRAVERSEEXECUTOR_H_ diff --git a/src/graph/planner/CMakeLists.txt b/src/graph/planner/CMakeLists.txt index b79f97edefd..f5e0eab5d70 100644 --- a/src/graph/planner/CMakeLists.txt +++ b/src/graph/planner/CMakeLists.txt @@ -25,7 +25,6 @@ nebula_add_library( match/StartVidFinder.cpp match/PropIndexSeek.cpp match/VertexIdSeek.cpp - match/Expand.cpp match/LabelIndexSeek.cpp plan/PlanNode.cpp plan/ExecutionPlan.cpp diff --git a/src/graph/planner/match/Expand.cpp b/src/graph/planner/match/Expand.cpp deleted file mode 100644 index 6a546c8f145..00000000000 --- a/src/graph/planner/match/Expand.cpp +++ /dev/null @@ -1,274 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/planner/match/Expand.h" - -#include "graph/planner/match/MatchSolver.h" -#include "graph/planner/match/SegmentsConnector.h" -#include "graph/planner/plan/Logic.h" -#include "graph/planner/plan/Query.h" -#include "graph/util/AnonColGenerator.h" -#include "graph/util/ExpressionUtils.h" -#include "graph/visitor/RewriteVisitor.h" - -using nebula::storage::cpp2::EdgeProp; -using nebula::storage::cpp2::VertexProp; -using PNKind = nebula::graph::PlanNode::Kind; - -namespace nebula { -namespace graph { - -static std::unique_ptr> genVertexProps() { - return std::make_unique>(); -} - -std::unique_ptr> Expand::genEdgeProps(const EdgeInfo& edge) { - auto edgeProps = std::make_unique>(); - for (auto edgeType : edge.edgeTypes) { - auto edgeSchema = matchCtx_->qctx->schemaMng()->getEdgeSchema(matchCtx_->space.id, edgeType); - - switch (edge.direction) { - case Direction::OUT_EDGE: { - if (reversely_) { - edgeType = -edgeType; - } - break; - } - case Direction::IN_EDGE: { - if (!reversely_) { - edgeType = -edgeType; - } - break; - } - case Direction::BOTH: { - EdgeProp edgeProp; - edgeProp.set_type(-edgeType); - std::vector props{kSrc, kType, kRank, kDst}; - for (std::size_t i = 0; i < edgeSchema->getNumFields(); ++i) { - props.emplace_back(edgeSchema->getFieldName(i)); - } - edgeProp.set_props(std::move(props)); - edgeProps->emplace_back(std::move(edgeProp)); - break; - } - } - EdgeProp edgeProp; - edgeProp.set_type(edgeType); - std::vector props{kSrc, kType, kRank, kDst}; - for (std::size_t i = 0; i < edgeSchema->getNumFields(); ++i) { - props.emplace_back(edgeSchema->getFieldName(i)); - } - edgeProp.set_props(std::move(props)); - edgeProps->emplace_back(std::move(edgeProp)); - } - return edgeProps; -} - -static Expression* mergePathColumnsExpr(ObjectPool* pool, - const std::string& lcol, - const std::string& rcol) { - auto expr = PathBuildExpression::make(pool); - expr->add(InputPropertyExpression::make(pool, lcol)); - expr->add(InputPropertyExpression::make(pool, rcol)); - return expr; -} - -static Expression* buildPathExpr(ObjectPool* pool) { - auto expr = PathBuildExpression::make(pool); - expr->add(VertexExpression::make(pool)); - expr->add(EdgeExpression::make(pool)); - return expr; -} - -Status Expand::doExpand(const NodeInfo& node, const EdgeInfo& edge, SubPlan* plan) { - NG_RETURN_IF_ERROR(expandSteps(node, edge, plan)); - NG_RETURN_IF_ERROR(filterDatasetByPathLength(edge, plan->root, plan)); - return Status::OK(); -} - -// Build subplan: Project->Dedup->GetNeighbors->[Filter]->Project2-> -// DataJoin->Project3->[Filter]->Passthrough->Loop->UnionAllVer -Status Expand::expandSteps(const NodeInfo& node, const EdgeInfo& edge, SubPlan* plan) { - SubPlan subplan; - int64_t startIndex = 0; - auto minHop = edge.range ? edge.range->min() : 1; - auto maxHop = edge.range ? edge.range->max() : 1; - - // Build first step - // In the case of 0 step, src node is the dst node, return the vertex directly - if (minHop == 0) { - subplan = *plan; - startIndex = 0; - // Get vertex - NG_RETURN_IF_ERROR(MatchSolver::appendFetchVertexPlan( - node.filter, matchCtx_->space, matchCtx_->qctx, &initialExpr_, inputVar_, subplan)); - } else { // Case 1 to n steps - startIndex = 1; - // Expand first step from src - NG_RETURN_IF_ERROR(expandStep(edge, dependency_, inputVar_, node.filter, &subplan)); - } - // No need to further expand if maxHop is the start Index - if (maxHop == startIndex) { - plan->root = subplan.root; - return Status::OK(); - } - // Result of first step expansion - PlanNode* firstStep = subplan.root; - - // Build Start node from first step - SubPlan loopBodyPlan; - PlanNode* startNode = StartNode::make(matchCtx_->qctx); - startNode->setOutputVar(firstStep->outputVar()); - startNode->setColNames(firstStep->colNames()); - loopBodyPlan.tail = startNode; - loopBodyPlan.root = startNode; - - // Construct loop body - NG_RETURN_IF_ERROR(expandStep(edge, - startNode, // dep - startNode->outputVar(), // inputVar - nullptr, - &loopBodyPlan)); - - NG_RETURN_IF_ERROR(collectData(startNode, // left join node - loopBodyPlan.root, // right join node - &firstStep, // passThrough - &subplan)); - // Union node - auto body = subplan.root; - - // Loop condition - auto condition = buildExpandCondition(body->outputVar(), startIndex, maxHop); - - // Create loop - auto* loop = Loop::make(matchCtx_->qctx, firstStep, body, condition); - - // Unionize the results of each expansion which are stored in the firstStep - // node - auto uResNode = UnionAllVersionVar::make(matchCtx_->qctx, loop); - uResNode->setInputVar(firstStep->outputVar()); - uResNode->setColNames({kPathStr}); - - subplan.root = uResNode; - plan->root = subplan.root; - return Status::OK(); -} - -// Build subplan: Project->Dedup->GetNeighbors->[Filter]->Project -Status Expand::expandStep(const EdgeInfo& edge, - PlanNode* dep, - const std::string& inputVar, - const Expression* nodeFilter, - SubPlan* plan) { - auto qctx = matchCtx_->qctx; - auto* pool = qctx->objPool(); - // Extract dst vid from input project node which output dataset format is: - // [v1,e1,...,vn,en] - SubPlan curr; - curr.root = dep; - MatchSolver::extractAndDedupVidColumn(qctx, &initialExpr_, dep, inputVar, curr); - // [GetNeighbors] - auto gn = GetNeighbors::make(qctx, curr.root, matchCtx_->space.id); - auto srcExpr = InputPropertyExpression::make(pool, kVid); - gn->setSrc(srcExpr); - gn->setVertexProps(genVertexProps()); - gn->setEdgeProps(genEdgeProps(edge)); - gn->setEdgeDirection(edge.direction); - - PlanNode* root = gn; - if (nodeFilter != nullptr) { - auto* newFilter = MatchSolver::rewriteLabel2Vertex(qctx, nodeFilter); - auto filterNode = Filter::make(matchCtx_->qctx, root, newFilter); - filterNode->setColNames(root->colNames()); - root = filterNode; - } - - if (edge.filter != nullptr) { - auto* newFilter = MatchSolver::rewriteLabel2Edge(qctx, edge.filter); - auto filterNode = Filter::make(qctx, root, newFilter); - filterNode->setColNames(root->colNames()); - root = filterNode; - } - - auto listColumns = saveObject(new YieldColumns); - listColumns->addColumn(new YieldColumn(buildPathExpr(pool), kPathStr)); - // [Project] - root = Project::make(qctx, root, listColumns); - root->setColNames({kPathStr}); - - plan->root = root; - plan->tail = curr.tail; - return Status::OK(); -} - -// Build subplan: DataJoin->Project->Filter -Status Expand::collectData(const PlanNode* joinLeft, - const PlanNode* joinRight, - PlanNode** passThrough, - SubPlan* plan) { - auto qctx = matchCtx_->qctx; - // [dataJoin] read start node (joinLeft) - auto join = SegmentsConnector::innerJoinSegments(qctx, joinLeft, joinRight); - auto lpath = folly::stringPrintf("%s_%d", kPathStr, 0); - auto rpath = folly::stringPrintf("%s_%d", kPathStr, 1); - join->setColNames({lpath, rpath}); - plan->tail = join; - - auto columns = saveObject(new YieldColumns); - auto listExpr = mergePathColumnsExpr(qctx->objPool(), lpath, rpath); - columns->addColumn(new YieldColumn(listExpr)); - // [Project] - auto project = Project::make(qctx, join, columns); - project->setColNames({kPathStr}); - // [Filter] - auto filter = MatchSolver::filtPathHasSameEdge(project, kPathStr, qctx); - // Update start node - filter->setOutputVar((*passThrough)->outputVar()); - plan->root = filter; - return Status::OK(); -} - -Status Expand::filterDatasetByPathLength(const EdgeInfo& edge, PlanNode* input, SubPlan* plan) { - auto qctx = matchCtx_->qctx; - auto* pool = qctx->objPool(); - - // Filter rows whose edges number less than min hop - auto args = ArgumentList::make(pool); - // Expr: length(relationships(p)) >= minHop - auto pathExpr = InputPropertyExpression::make(pool, kPathStr); - args->addArgument(pathExpr); - auto edgeExpr = FunctionCallExpression::make(pool, "length", args); - auto minHop = edge.range == nullptr ? 1 : edge.range->min(); - auto minHopExpr = ConstantExpression::make(pool, minHop); - auto expr = RelationalExpression::makeGE(pool, edgeExpr, minHopExpr); - - auto filter = Filter::make(qctx, input, expr); - filter->setColNames(input->colNames()); - plan->root = filter; - return Status::OK(); -} - -// loopSteps{startIndex} <= maxHop && ($lastStepResult == empty || -// size($lastStepResult) != 0) -Expression* Expand::buildExpandCondition(const std::string& lastStepResult, - int64_t startIndex, - int64_t maxHop) const { - VLOG(1) << "match expand maxHop: " << maxHop; - auto pool = matchCtx_->qctx->objPool(); - auto loopSteps = matchCtx_->qctx->vctx()->anonVarGen()->getVar(); - matchCtx_->qctx->ectx()->setValue(loopSteps, startIndex); - // ++loopSteps{startIndex} << maxHop - auto stepCondition = ExpressionUtils::stepCondition(pool, loopSteps, maxHop); - // lastStepResult == empty || size(lastStepReult) != 0 - auto* eqEmpty = RelationalExpression::makeEQ(pool, - VariableExpression::make(pool, lastStepResult), - ConstantExpression::make(pool, Value())); - auto neZero = ExpressionUtils::neZeroCondition(pool, lastStepResult); - auto* existValCondition = LogicalExpression::makeOr(pool, eqEmpty, neZero); - return LogicalExpression::makeAnd(pool, stepCondition, existValCondition); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/planner/match/Expand.h b/src/graph/planner/match/Expand.h deleted file mode 100644 index 9292173b5d0..00000000000 --- a/src/graph/planner/match/Expand.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_PLANNER_MATCH_EXPAND_H_ -#define GRAPH_PLANNER_MATCH_EXPAND_H_ - -#include "common/base/Base.h" -#include "graph/context/ast/CypherAstContext.h" -#include "graph/planner/Planner.h" -#include "graph/planner/plan/PlanNode.h" -#include "graph/util/ExpressionUtils.h" - -namespace nebula { -namespace graph { -/* - * The Expand was designed to handle the pattern expanding. - */ -class Expand final { - public: - Expand(MatchClauseContext* matchCtx, Expression* initialExpr) - : matchCtx_(matchCtx), initialExpr_(initialExpr) {} - - Expand* reversely() { - reversely_ = true; - return this; - } - - Expand* depends(PlanNode* dep) { - dependency_ = dep; - return this; - } - - Expand* inputVar(const std::string& inputVar) { - inputVar_ = inputVar; - return this; - } - - Status doExpand(const NodeInfo& node, const EdgeInfo& edge, SubPlan* plan); - - private: - Status expandSteps(const NodeInfo& node, const EdgeInfo& edge, SubPlan* plan); - - Status expandStep(const EdgeInfo& edge, - PlanNode* dep, - const std::string& inputVar, - const Expression* nodeFilter, - SubPlan* plan); - - Status collectData(const PlanNode* joinLeft, - const PlanNode* joinRight, - PlanNode** passThrough, - SubPlan* plan); - - Status filterDatasetByPathLength(const EdgeInfo& edge, PlanNode* input, SubPlan* plan); - - Expression* buildExpandCondition(const std::string& lastStepResult, - int64_t startIndex, - int64_t maxHop) const; - - template - T* saveObject(T* obj) const { - return matchCtx_->qctx->objPool()->add(obj); - } - - std::unique_ptr> genEdgeProps(const EdgeInfo& edge); - - MatchClauseContext* matchCtx_; - Expression* initialExpr_{nullptr}; - bool reversely_{false}; - PlanNode* dependency_{nullptr}; - std::string inputVar_; -}; -} // namespace graph -} // namespace nebula -#endif // GRAPH_PLANNER_MATCH_EXPAND_H_ diff --git a/src/graph/planner/match/LabelIndexSeek.cpp b/src/graph/planner/match/LabelIndexSeek.cpp index 07187484509..ae9e6473be6 100644 --- a/src/graph/planner/match/LabelIndexSeek.cpp +++ b/src/graph/planner/match/LabelIndexSeek.cpp @@ -170,21 +170,29 @@ StatusOr LabelIndexSeek::transformEdge(EdgeContext* edgeCtx) { auto* pool = qctx->objPool(); if (edgeCtx->scanInfo.direction == MatchEdge::Direction::BOTH) { - // merge the src,dst to one column - auto* yieldColumns = pool->makeAndAdd(); - auto* exprList = ExpressionList::make(pool); - exprList->add(ColumnExpression::make(pool, 0)); // src - exprList->add(ColumnExpression::make(pool, 1)); // dst - yieldColumns->addColumn(new YieldColumn(ListExpression::make(pool, exprList))); - auto* project = Project::make(qctx, scan, yieldColumns); - project->setColNames({kVid}); - - auto* unwindExpr = ColumnExpression::make(pool, 0); - auto* unwind = Unwind::make(matchClauseCtx->qctx, project, unwindExpr, kVid); - unwind->setColNames({"vidList", kVid}); - plan.root = unwind; + PlanNode* left = nullptr; + { + auto* yieldColumns = pool->makeAndAdd(); + yieldColumns->addColumn(new YieldColumn(InputPropertyExpression::make(pool, kSrc))); + left = Project::make(qctx, scan, yieldColumns); + left->setColNames({kVid}); + } + PlanNode* right = nullptr; + { + auto* yieldColumns = pool->makeAndAdd(); + yieldColumns->addColumn(new YieldColumn(InputPropertyExpression::make(pool, kDst))); + right = Project::make(qctx, scan, yieldColumns); + right->setColNames({kVid}); + } + + plan.root = Union::make(qctx, left, right); + plan.root->setColNames({kVid}); } + auto* dedup = Dedup::make(qctx, plan.root); + dedup->setColNames(plan.root->colNames()); + plan.root = dedup; + // initialize start expression in project node edgeCtx->initialExpr = VariablePropertyExpression::make(pool, "", kVid); return plan; diff --git a/src/graph/planner/match/MatchClausePlanner.cpp b/src/graph/planner/match/MatchClausePlanner.cpp index 4a191ab466f..7ff42c9914b 100644 --- a/src/graph/planner/match/MatchClausePlanner.cpp +++ b/src/graph/planner/match/MatchClausePlanner.cpp @@ -6,19 +6,99 @@ #include "graph/planner/match/MatchClausePlanner.h" #include "graph/context/ast/CypherAstContext.h" -#include "graph/planner/match/Expand.h" #include "graph/planner/match/MatchSolver.h" #include "graph/planner/match/SegmentsConnector.h" #include "graph/planner/match/StartVidFinder.h" #include "graph/planner/match/WhereClausePlanner.h" #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" +#include "graph/util/SchemaUtil.h" #include "graph/visitor/RewriteVisitor.h" using JoinStrategyPos = nebula::graph::InnerJoinStrategy::JoinPos; namespace nebula { namespace graph { +static std::vector genTraverseColNames(const std::vector& inputCols, + const NodeInfo& node, + const EdgeInfo& edge) { + auto cols = inputCols; + cols.emplace_back(node.alias); + cols.emplace_back(edge.alias); + return cols; +} + +static std::vector genAppendVColNames(const std::vector& inputCols, + const NodeInfo& node) { + auto cols = inputCols; + cols.emplace_back(node.alias); + return cols; +} + +static Expression* genNextTraverseStart(ObjectPool* pool, const EdgeInfo& edge) { + auto args = ArgumentList::make(pool); + args->addArgument(InputPropertyExpression::make(pool, edge.alias)); + return FunctionCallExpression::make(pool, "none_direct_dst", args); +} + +static Expression* genVertexFilter(const NodeInfo& node) { return node.filter; } + +static Expression* genEdgeFilter(const EdgeInfo& edge) { return edge.filter; } + +static std::unique_ptr> genVertexProps(const NodeInfo& node, + QueryContext* qctx, + GraphSpaceID spaceId) { + // TODO + UNUSED(node); + UNUSED(qctx); + UNUSED(spaceId); + return std::make_unique>(); +} + +static std::unique_ptr> genEdgeProps(const EdgeInfo& edge, + bool reversely, + QueryContext* qctx, + GraphSpaceID spaceId) { + auto edgeProps = std::make_unique>(); + for (auto edgeType : edge.edgeTypes) { + auto edgeSchema = qctx->schemaMng()->getEdgeSchema(spaceId, edgeType); + + switch (edge.direction) { + case Direction::OUT_EDGE: { + if (reversely) { + edgeType = -edgeType; + } + break; + } + case Direction::IN_EDGE: { + if (!reversely) { + edgeType = -edgeType; + } + break; + } + case Direction::BOTH: { + EdgeProp edgeProp; + edgeProp.set_type(-edgeType); + std::vector props{kSrc, kType, kRank, kDst}; + for (std::size_t i = 0; i < edgeSchema->getNumFields(); ++i) { + props.emplace_back(edgeSchema->getFieldName(i)); + } + edgeProp.set_props(std::move(props)); + edgeProps->emplace_back(std::move(edgeProp)); + break; + } + } + EdgeProp edgeProp; + edgeProp.set_type(edgeType); + std::vector props{kSrc, kType, kRank, kDst}; + for (std::size_t i = 0; i < edgeSchema->getNumFields(); ++i) { + props.emplace_back(edgeSchema->getFieldName(i)); + } + edgeProp.set_props(std::move(props)); + edgeProps->emplace_back(std::move(edgeProp)); + } + return edgeProps; +} StatusOr MatchClausePlanner::transform(CypherClauseContextBase* clauseCtx) { if (clauseCtx->kind != CypherClauseKind::kMatch) { @@ -35,7 +115,7 @@ StatusOr MatchClausePlanner::transform(CypherClauseContextBase* clauseC NG_RETURN_IF_ERROR(findStarts(matchClauseCtx, startFromEdge, startIndex, matchClausePlan)); NG_RETURN_IF_ERROR( expand(nodeInfos, edgeInfos, matchClauseCtx, startFromEdge, startIndex, matchClausePlan)); - NG_RETURN_IF_ERROR(projectColumnsBySymbols(matchClauseCtx, startIndex, matchClausePlan)); + NG_RETURN_IF_ERROR(projectColumnsBySymbols(matchClauseCtx, matchClausePlan)); NG_RETURN_IF_ERROR(appendFilterPlan(matchClauseCtx, matchClausePlan)); return matchClausePlan; } @@ -130,14 +210,9 @@ Status MatchClausePlanner::expandFromNode(const std::vector& nodeInfos // Pattern: ()-[]-...-(start)-...-[]-() NG_RETURN_IF_ERROR( rightExpandFromNode(nodeInfos, edgeInfos, matchClauseCtx, startIndex, subplan)); - auto left = subplan.root; - NG_RETURN_IF_ERROR( - leftExpandFromNode(nodeInfos, edgeInfos, matchClauseCtx, startIndex, var, subplan)); + NG_RETURN_IF_ERROR(leftExpandFromNode( + nodeInfos, edgeInfos, matchClauseCtx, startIndex, subplan.root->outputVar(), subplan)); - // Connect the left expand and right expand part. - auto right = subplan.root; - subplan.root = SegmentsConnector::innerJoinSegments( - matchClauseCtx->qctx, left, right, JoinStrategyPos::kStart, JoinStrategyPos::kStart); return Status::OK(); } @@ -147,48 +222,47 @@ Status MatchClausePlanner::leftExpandFromNode(const std::vector& nodeI size_t startIndex, std::string inputVar, SubPlan& subplan) { - std::vector joinColNames = { - folly::stringPrintf("%s_%lu", kPathStr, nodeInfos.size())}; + Expression* nextTraverseStart = nullptr; + auto qctx = matchClauseCtx->qctx; + if (startIndex == nodeInfos.size() - 1) { + nextTraverseStart = initialExpr_; + } else { + auto* pool = qctx->objPool(); + auto args = ArgumentList::make(pool); + args->addArgument(InputPropertyExpression::make(pool, nodeInfos[startIndex].alias)); + nextTraverseStart = FunctionCallExpression::make(pool, "id", args); + } + auto spaceId = matchClauseCtx->space.id; + bool reversely = true; for (size_t i = startIndex; i > 0; --i) { - auto left = subplan.root; - auto status = - std::make_unique(matchClauseCtx, i == startIndex ? initialExpr_->clone() : nullptr) - ->depends(subplan.root) - ->inputVar(inputVar) - ->reversely() - ->doExpand(nodeInfos[i], edgeInfos[i - 1], &subplan); - if (!status.ok()) { - return status; - } - if (i < startIndex) { - auto right = subplan.root; - VLOG(1) << "left: " << folly::join(",", left->colNames()) - << " right: " << folly::join(",", right->colNames()); - subplan.root = SegmentsConnector::innerJoinSegments(matchClauseCtx->qctx, left, right); - joinColNames.emplace_back(folly::stringPrintf("%s_%lu", kPathStr, nodeInfos.size() + i)); - subplan.root->setColNames(joinColNames); - } - inputVar = subplan.root->outputVar(); + auto& node = nodeInfos[i]; + auto& edge = edgeInfos[i - 1]; + auto traverse = Traverse::make(qctx, subplan.root, spaceId); + traverse->setSrc(nextTraverseStart); + traverse->setVertexProps(genVertexProps(node, qctx, spaceId)); + traverse->setEdgeProps(genEdgeProps(edge, reversely, qctx, spaceId)); + traverse->setVertexFilter(genVertexFilter(node)); + traverse->setEdgeFilter(genEdgeFilter(edge)); + traverse->setEdgeDirection(edge.direction); + traverse->setColNames(genTraverseColNames(subplan.root->colNames(), node, edge)); + traverse->setStepRange(edge.range); + traverse->setDedup(); + subplan.root = traverse; + nextTraverseStart = genNextTraverseStart(qctx->objPool(), edge); + inputVar = traverse->outputVar(); } VLOG(1) << subplan; - auto left = subplan.root; - auto* initialExprCopy = initialExpr_->clone(); - NG_RETURN_IF_ERROR( - MatchSolver::appendFetchVertexPlan(nodeInfos.front().filter, - matchClauseCtx->space, - matchClauseCtx->qctx, - edgeInfos.empty() ? &initialExprCopy : nullptr, - subplan)); - if (!edgeInfos.empty()) { - auto right = subplan.root; - VLOG(1) << "left: " << folly::join(",", left->colNames()) - << " right: " << folly::join(",", right->colNames()); - subplan.root = SegmentsConnector::innerJoinSegments(matchClauseCtx->qctx, left, right); - joinColNames.emplace_back( - folly::stringPrintf("%s_%lu", kPathStr, nodeInfos.size() + startIndex)); - subplan.root->setColNames(joinColNames); - } + auto& node = nodeInfos.front(); + auto appendV = AppendVertices::make(qctx, subplan.root, spaceId); + auto vertexProps = SchemaUtil::getAllVertexProp(qctx, spaceId, true); + NG_RETURN_IF_ERROR(vertexProps); + appendV->setVertexProps(std::move(vertexProps).value()); + appendV->setSrc(nextTraverseStart); + appendV->setVertexFilter(genVertexFilter(node)); + appendV->setColNames(genAppendVColNames(subplan.root->colNames(), node)); + appendV->setDedup(); + subplan.root = appendV; VLOG(1) << subplan; return Status::OK(); @@ -199,44 +273,40 @@ Status MatchClausePlanner::rightExpandFromNode(const std::vector& node MatchClauseContext* matchClauseCtx, size_t startIndex, SubPlan& subplan) { - std::vector joinColNames = {folly::stringPrintf("%s_%lu", kPathStr, startIndex)}; + auto inputVar = subplan.root->outputVar(); + auto qctx = matchClauseCtx->qctx; + auto spaceId = matchClauseCtx->space.id; + Expression* nextTraverseStart = initialExpr_; + bool reversely = false; for (size_t i = startIndex; i < edgeInfos.size(); ++i) { - auto left = subplan.root; - auto status = - std::make_unique(matchClauseCtx, i == startIndex ? initialExpr_->clone() : nullptr) - ->depends(subplan.root) - ->inputVar(subplan.root->outputVar()) - ->doExpand(nodeInfos[i], edgeInfos[i], &subplan); - if (!status.ok()) { - return status; - } - if (i > startIndex) { - auto right = subplan.root; - VLOG(1) << "left: " << folly::join(",", left->colNames()) - << " right: " << folly::join(",", right->colNames()); - subplan.root = SegmentsConnector::innerJoinSegments(matchClauseCtx->qctx, left, right); - joinColNames.emplace_back(folly::stringPrintf("%s_%lu", kPathStr, i)); - subplan.root->setColNames(joinColNames); - } + auto& node = nodeInfos[i]; + auto& edge = edgeInfos[i]; + auto traverse = Traverse::make(qctx, subplan.root, spaceId); + traverse->setSrc(nextTraverseStart); + traverse->setVertexProps(genVertexProps(node, qctx, spaceId)); + traverse->setEdgeProps(genEdgeProps(edge, reversely, qctx, spaceId)); + traverse->setVertexFilter(genVertexFilter(node)); + traverse->setEdgeFilter(genEdgeFilter(edge)); + traverse->setEdgeDirection(edge.direction); + traverse->setColNames(genTraverseColNames(subplan.root->colNames(), node, edge)); + traverse->setStepRange(edge.range); + traverse->setDedup(); + subplan.root = traverse; + nextTraverseStart = genNextTraverseStart(qctx->objPool(), edge); + inputVar = traverse->outputVar(); } VLOG(1) << subplan; - auto left = subplan.root; - auto* initialExprCopy = initialExpr_->clone(); - NG_RETURN_IF_ERROR( - MatchSolver::appendFetchVertexPlan(nodeInfos.back().filter, - matchClauseCtx->space, - matchClauseCtx->qctx, - edgeInfos.empty() ? &initialExprCopy : nullptr, - subplan)); - if (!edgeInfos.empty()) { - auto right = subplan.root; - VLOG(1) << "left: " << folly::join(",", left->colNames()) - << " right: " << folly::join(",", right->colNames()); - subplan.root = SegmentsConnector::innerJoinSegments(matchClauseCtx->qctx, left, right); - joinColNames.emplace_back(folly::stringPrintf("%s_%lu", kPathStr, edgeInfos.size())); - subplan.root->setColNames(joinColNames); - } + auto& node = nodeInfos.back(); + auto appendV = AppendVertices::make(qctx, subplan.root, spaceId); + auto vertexProps = SchemaUtil::getAllVertexProp(qctx, spaceId, true); + NG_RETURN_IF_ERROR(vertexProps); + appendV->setVertexProps(std::move(vertexProps).value()); + appendV->setSrc(nextTraverseStart); + appendV->setVertexFilter(genVertexFilter(node)); + appendV->setColNames(genAppendVColNames(subplan.root->colNames(), node)); + appendV->setDedup(); + subplan.root = appendV; VLOG(1) << subplan; return Status::OK(); @@ -251,146 +321,80 @@ Status MatchClausePlanner::expandFromEdge(const std::vector& nodeInfos } Status MatchClausePlanner::projectColumnsBySymbols(MatchClauseContext* matchClauseCtx, - size_t startIndex, SubPlan& plan) { auto qctx = matchClauseCtx->qctx; auto& nodeInfos = matchClauseCtx->nodeInfos; auto& edgeInfos = matchClauseCtx->edgeInfos; - auto input = plan.root; - const auto& inColNames = input->colNames(); auto columns = qctx->objPool()->add(new YieldColumns); std::vector colNames; - auto addNode = [&, this](size_t i) { - auto& nodeInfo = nodeInfos[i]; + auto addNode = [this, columns, &colNames, matchClauseCtx](auto& nodeInfo) { if (!nodeInfo.alias.empty() && !nodeInfo.anonymous) { - if (i >= startIndex) { - columns->addColumn( - buildVertexColumn(matchClauseCtx, inColNames[i - startIndex], nodeInfo.alias)); - } else if (startIndex == (nodeInfos.size() - 1)) { - columns->addColumn( - buildVertexColumn(matchClauseCtx, inColNames[startIndex - i], nodeInfo.alias)); - } else { - columns->addColumn( - buildVertexColumn(matchClauseCtx, inColNames[nodeInfos.size() - i], nodeInfo.alias)); - } + columns->addColumn(buildVertexColumn(matchClauseCtx, nodeInfo.alias)); colNames.emplace_back(nodeInfo.alias); } }; - for (size_t i = 0; i < edgeInfos.size(); i++) { - VLOG(1) << "colSize: " << inColNames.size() << "i: " << i << " nodesize: " << nodeInfos.size() - << " start: " << startIndex; - addNode(i); - auto& edgeInfo = edgeInfos[i]; + auto addEdge = [this, columns, &colNames, matchClauseCtx](auto& edgeInfo) { if (!edgeInfo.alias.empty() && !edgeInfo.anonymous) { - if (i >= startIndex) { - columns->addColumn(buildEdgeColumn(matchClauseCtx, inColNames[i - startIndex], edgeInfo)); - } else if (startIndex == (nodeInfos.size() - 1)) { - columns->addColumn( - buildEdgeColumn(matchClauseCtx, inColNames[edgeInfos.size() - 1 - i], edgeInfo)); - } else { - columns->addColumn( - buildEdgeColumn(matchClauseCtx, inColNames[edgeInfos.size() - i], edgeInfo)); - } + columns->addColumn(buildEdgeColumn(matchClauseCtx, edgeInfo)); colNames.emplace_back(edgeInfo.alias); } + }; + + for (size_t i = 0; i < edgeInfos.size(); i++) { + addNode(nodeInfos[i]); + addEdge(edgeInfos[i]); } // last vertex DCHECK(!nodeInfos.empty()); - addNode(nodeInfos.size() - 1); + addNode(nodeInfos.back()); const auto& aliases = matchClauseCtx->aliasesGenerated; auto iter = std::find_if(aliases.begin(), aliases.end(), [](const auto& alias) { return alias.second == AliasType::kPath; }); - std::string alias = iter != aliases.end() ? iter->first : qctx->vctx()->anonColGen()->getCol(); - columns->addColumn( - buildPathColumn(matchClauseCtx, alias, startIndex, inColNames, nodeInfos.size())); - colNames.emplace_back(alias); + if (iter != aliases.end()) { + auto& alias = iter->first; + columns->addColumn(buildPathColumn(matchClauseCtx, alias)); + colNames.emplace_back(alias); + } - auto project = Project::make(qctx, input, columns); + auto project = Project::make(qctx, plan.root, columns); project->setColNames(std::move(colNames)); - plan.root = MatchSolver::filtPathHasSameEdge(project, alias, qctx); + plan.root = project; VLOG(1) << plan; return Status::OK(); } YieldColumn* MatchClausePlanner::buildVertexColumn(MatchClauseContext* matchClauseCtx, - const std::string& colName, const std::string& alias) const { - auto* pool = matchClauseCtx->qctx->objPool(); - auto colExpr = InputPropertyExpression::make(pool, colName); - // startNode(path) => head node of path - auto args = ArgumentList::make(pool); - args->addArgument(colExpr); - auto firstVertexExpr = FunctionCallExpression::make(pool, "startNode", args); - return new YieldColumn(firstVertexExpr, alias); + return new YieldColumn(InputPropertyExpression::make(matchClauseCtx->qctx->objPool(), alias), + alias); } YieldColumn* MatchClausePlanner::buildEdgeColumn(MatchClauseContext* matchClauseCtx, - const std::string& colName, EdgeInfo& edge) const { auto* pool = matchClauseCtx->qctx->objPool(); - auto colExpr = InputPropertyExpression::make(pool, colName); - // relationships(p) - auto args = ArgumentList::make(pool); - args->addArgument(colExpr); - auto relExpr = FunctionCallExpression::make(pool, "relationships", args); Expression* expr = nullptr; - if (edge.range != nullptr) { - expr = relExpr; + if (edge.range == nullptr) { + expr = SubscriptExpression::make( + pool, InputPropertyExpression::make(pool, edge.alias), ConstantExpression::make(pool, 0)); } else { - // Get first edge in path list [e1, e2, ...] - auto idxExpr = ConstantExpression::make(pool, 0); - auto subExpr = SubscriptExpression::make(pool, relExpr, idxExpr); - expr = subExpr; + auto* args = ArgumentList::make(pool); + args->addArgument(VariableExpression::make(pool, "e")); + auto* filter = FunctionCallExpression::make(pool, "is_edge", args); + expr = ListComprehensionExpression::make( + pool, "e", InputPropertyExpression::make(pool, edge.alias), filter); } return new YieldColumn(expr, edge.alias); } YieldColumn* MatchClausePlanner::buildPathColumn(MatchClauseContext* matchClauseCtx, - const std::string& alias, - size_t startIndex, - const std::vector colNames, - size_t nodeInfoSize) const { - auto colSize = colNames.size(); - DCHECK((nodeInfoSize == colSize) || (nodeInfoSize + 1 == colSize)); - size_t bound = 0; - if (colSize > nodeInfoSize) { - bound = colSize - startIndex - 1; - } else if (startIndex == (nodeInfoSize - 1)) { - bound = 0; - } else { - bound = colSize - startIndex; - } - auto* pool = matchClauseCtx->qctx->objPool(); - auto rightExpandPath = PathBuildExpression::make(pool); - for (size_t i = 0; i < bound; ++i) { - rightExpandPath->add(InputPropertyExpression::make(pool, colNames[i])); - } - - auto leftExpandPath = PathBuildExpression::make(pool); - for (size_t i = bound; i < colNames.size(); ++i) { - leftExpandPath->add(InputPropertyExpression::make(pool, colNames[i])); - } - - auto finalPath = PathBuildExpression::make(pool); - if (leftExpandPath->size() != 0) { - auto args = ArgumentList::make(pool); - args->addArgument(leftExpandPath); - auto reversePath = FunctionCallExpression::make(pool, "reversePath", args); - if (rightExpandPath->size() == 0) { - return new YieldColumn(reversePath, alias); - } - finalPath->add(reversePath); - } - if (rightExpandPath->size() != 0) { - finalPath->add(rightExpandPath); - } - return new YieldColumn(finalPath, alias); + const std::string& alias) const { + return new YieldColumn(matchClauseCtx->pathBuild, alias); } Status MatchClausePlanner::appendFilterPlan(MatchClauseContext* matchClauseCtx, SubPlan& subplan) { diff --git a/src/graph/planner/match/MatchClausePlanner.h b/src/graph/planner/match/MatchClausePlanner.h index 151e774a27a..19b7c860701 100644 --- a/src/graph/planner/match/MatchClausePlanner.h +++ b/src/graph/planner/match/MatchClausePlanner.h @@ -59,23 +59,14 @@ class MatchClausePlanner final : public CypherClausePlanner { size_t startIndex, SubPlan& subplan); - Status projectColumnsBySymbols(MatchClauseContext* matchClauseCtx, - size_t startIndex, - SubPlan& plan); + Status projectColumnsBySymbols(MatchClauseContext* matchClauseCtx, SubPlan& plan); YieldColumn* buildVertexColumn(MatchClauseContext* matchClauseCtx, - const std::string& colName, const std::string& alias) const; - YieldColumn* buildEdgeColumn(MatchClauseContext* matchClauseCtx, - const std::string& colName, - EdgeInfo& edge) const; + YieldColumn* buildEdgeColumn(MatchClauseContext* matchClauseCtx, EdgeInfo& edge) const; - YieldColumn* buildPathColumn(MatchClauseContext* matchClauseCtx, - const std::string& alias, - size_t startIndex, - const std::vector colNames, - size_t nodeInfoSize) const; + YieldColumn* buildPathColumn(MatchClauseContext* matchClauseCtx, const std::string& alias) const; Status appendFilterPlan(MatchClauseContext* matchClauseCtx, SubPlan& subplan); diff --git a/src/graph/planner/match/MatchSolver.cpp b/src/graph/planner/match/MatchSolver.cpp index b66ec18312e..92e9832199d 100644 --- a/src/graph/planner/match/MatchSolver.cpp +++ b/src/graph/planner/match/MatchSolver.cpp @@ -276,7 +276,7 @@ Status MatchSolver::appendFetchVertexPlan(const Expression* nodeFilter, extractAndDedupVidColumn(qctx, initialExpr, plan.root, inputVar, plan); auto srcExpr = InputPropertyExpression::make(pool, kVid); // [Get vertices] - auto props = SchemaUtil::getAllVertexProp(qctx, space, true); + auto props = SchemaUtil::getAllVertexProp(qctx, space.id, true); NG_RETURN_IF_ERROR(props); auto gv = GetVertices::make(qctx, plan.root, space.id, srcExpr, std::move(props).value(), {}); diff --git a/src/graph/planner/match/PropIndexSeek.cpp b/src/graph/planner/match/PropIndexSeek.cpp index e1c2d47b785..257f24f7e46 100644 --- a/src/graph/planner/match/PropIndexSeek.cpp +++ b/src/graph/planner/match/PropIndexSeek.cpp @@ -89,21 +89,29 @@ StatusOr PropIndexSeek::transformEdge(EdgeContext* edgeCtx) { auto* pool = qctx->objPool(); if (edgeCtx->scanInfo.direction == MatchEdge::Direction::BOTH) { - // merge the src,dst to one column - auto* yieldColumns = pool->makeAndAdd(); - auto* exprList = ExpressionList::make(pool); - exprList->add(ColumnExpression::make(pool, 0)); // src - exprList->add(ColumnExpression::make(pool, 1)); // dst - yieldColumns->addColumn(new YieldColumn(ListExpression::make(pool, exprList))); - auto* project = Project::make(qctx, scan, yieldColumns); - project->setColNames({kVid}); - - auto* unwindExpr = ColumnExpression::make(pool, 0); - auto* unwind = Unwind::make(qctx, project, unwindExpr, kVid); - unwind->setColNames({"vidList", kVid}); - plan.root = unwind; + PlanNode* left = nullptr; + { + auto* yieldColumns = pool->makeAndAdd(); + yieldColumns->addColumn(new YieldColumn(InputPropertyExpression::make(pool, kSrc))); + left = Project::make(qctx, scan, yieldColumns); + left->setColNames({kVid}); + } + PlanNode* right = nullptr; + { + auto* yieldColumns = pool->makeAndAdd(); + yieldColumns->addColumn(new YieldColumn(InputPropertyExpression::make(pool, kDst))); + right = Project::make(qctx, scan, yieldColumns); + right->setColNames({kVid}); + } + + plan.root = Union::make(qctx, left, right); + plan.root->setColNames({kVid}); } + auto* dedup = Dedup::make(qctx, plan.root); + dedup->setColNames(plan.root->colNames()); + plan.root = dedup; + // initialize start expression in project edge edgeCtx->initialExpr = VariablePropertyExpression::make(pool, "", kVid); return plan; diff --git a/src/graph/planner/match/VertexIdSeek.cpp b/src/graph/planner/match/VertexIdSeek.cpp index 1b27b877319..9d0df2a085d 100644 --- a/src/graph/planner/match/VertexIdSeek.cpp +++ b/src/graph/planner/match/VertexIdSeek.cpp @@ -7,6 +7,7 @@ #include "graph/planner/match/MatchSolver.h" #include "graph/planner/plan/Logic.h" +#include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" #include "graph/util/SchemaUtil.h" #include "graph/visitor/VidExtractVisitor.h" @@ -53,34 +54,16 @@ bool VertexIdSeek::matchNode(NodeContext *nodeCtx) { return false; } -std::pair VertexIdSeek::listToAnnoVarVid(QueryContext *qctx, - const List &list) { +std::string VertexIdSeek::listToAnnoVarVid(QueryContext *qctx, const List &list) { auto input = qctx->vctx()->anonVarGen()->getVar(); DataSet vids({kVid}); - QueryExpressionContext dummy; for (auto &v : list.values) { vids.emplace_back(Row({std::move(v)})); } qctx->ectx()->setResult(input, ResultBuilder().value(Value(std::move(vids))).build()); - auto *pool = qctx->objPool(); - auto *src = VariablePropertyExpression::make(pool, input, kVid); - return std::pair(input, src); -} - -std::pair VertexIdSeek::constToAnnoVarVid(QueryContext *qctx, - const Value &v) { - auto input = qctx->vctx()->anonVarGen()->getVar(); - DataSet vids({kVid}); - QueryExpressionContext dummy; - vids.emplace_back(Row({v})); - - qctx->ectx()->setResult(input, ResultBuilder().value(Value(std::move(vids))).build()); - - auto *pool = qctx->objPool(); - auto *src = VariablePropertyExpression::make(pool, input, kVid); - return std::pair(input, src); + return input; } StatusOr VertexIdSeek::transformNode(NodeContext *nodeCtx) { @@ -88,16 +71,19 @@ StatusOr VertexIdSeek::transformNode(NodeContext *nodeCtx) { auto *matchClauseCtx = nodeCtx->matchClauseCtx; auto *qctx = matchClauseCtx->qctx; - QueryExpressionContext dummy; - std::pair vidsResult = listToAnnoVarVid(qctx, nodeCtx->ids); + std::string inputVar = listToAnnoVarVid(qctx, nodeCtx->ids); auto *passThrough = PassThroughNode::make(qctx, nullptr); - passThrough->setOutputVar(vidsResult.first); + passThrough->setOutputVar(inputVar); passThrough->setColNames({kVid}); - plan.root = passThrough; + + auto *dedup = Dedup::make(qctx, passThrough); + dedup->setColNames({kVid}); + + plan.root = dedup; plan.tail = passThrough; - nodeCtx->initialExpr = vidsResult.second; + nodeCtx->initialExpr = InputPropertyExpression::make(qctx->objPool(), kVid); return plan; } diff --git a/src/graph/planner/match/VertexIdSeek.h b/src/graph/planner/match/VertexIdSeek.h index 891b3b2b8c6..2fd58a172aa 100644 --- a/src/graph/planner/match/VertexIdSeek.h +++ b/src/graph/planner/match/VertexIdSeek.h @@ -31,9 +31,7 @@ class VertexIdSeek final : public StartVidFinder { StatusOr transformEdge(EdgeContext* edgeCtx) override; - std::pair listToAnnoVarVid(QueryContext* qctx, const List& list); - - std::pair constToAnnoVarVid(QueryContext* qctx, const Value& v); + std::string listToAnnoVarVid(QueryContext* qctx, const List& list); private: VertexIdSeek() = default; diff --git a/src/graph/planner/ngql/PathPlanner.cpp b/src/graph/planner/ngql/PathPlanner.cpp index 00b683627e6..f6f7843788c 100644 --- a/src/graph/planner/ngql/PathPlanner.cpp +++ b/src/graph/planner/ngql/PathPlanner.cpp @@ -409,7 +409,7 @@ PlanNode* PathPlanner::buildVertexPlan(PlanNode* dep, const std::string& input) idArgs->addArgument(ColumnExpression::make(pool, 1)); auto* src = FunctionCallExpression::make(pool, "id", idArgs); // get all vertexprop - auto vertexProp = SchemaUtil::getAllVertexProp(qctx, pathCtx_->space, true); + auto vertexProp = SchemaUtil::getAllVertexProp(qctx, pathCtx_->space.id, true); auto* getVertices = GetVertices::make( qctx, unwind, pathCtx_->space.id, src, std::move(vertexProp).value(), {}, true); diff --git a/src/graph/planner/ngql/SubgraphPlanner.cpp b/src/graph/planner/ngql/SubgraphPlanner.cpp index 339724b15bd..ce7a583701c 100644 --- a/src/graph/planner/ngql/SubgraphPlanner.cpp +++ b/src/graph/planner/ngql/SubgraphPlanner.cpp @@ -55,7 +55,7 @@ StatusOr SubgraphPlanner::nSteps(SubPlan& startVidPlan, const std::stri auto* startNode = StartNode::make(qctx); bool getVertexProp = subgraphCtx_->withProp && subgraphCtx_->getVertexProp; - auto vertexProps = SchemaUtil::getAllVertexProp(qctx, space, getVertexProp); + auto vertexProps = SchemaUtil::getAllVertexProp(qctx, space.id, getVertexProp); NG_RETURN_IF_ERROR(vertexProps); auto edgeProps = buildEdgeProps(); NG_RETURN_IF_ERROR(edgeProps); @@ -93,7 +93,7 @@ StatusOr SubgraphPlanner::zeroStep(SubPlan& startVidPlan, const std::st const auto& space = subgraphCtx_->space; auto* pool = qctx->objPool(); // get all vertexProp - auto vertexProp = SchemaUtil::getAllVertexProp(qctx, space, subgraphCtx_->withProp); + auto vertexProp = SchemaUtil::getAllVertexProp(qctx, space.id, subgraphCtx_->withProp); NG_RETURN_IF_ERROR(vertexProp); auto* getVertex = GetVertices::make(qctx, startVidPlan.root, diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 6b0cfcfc8ad..587041ee401 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -276,7 +276,6 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "Download"; case Kind::kIngest: return "Ingest"; - // no default so the compiler will warning when lack case Kind::kShowSessions: return "ShowSessions"; case Kind::kUpdateSession: @@ -285,6 +284,10 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "ShowQueries"; case Kind::kKillQuery: return "KillQuery"; + case Kind::kTraverse: + return "Traverse"; + case Kind::kAppendVertices: + return "AppendVertices"; // no default so the compiler will warning when lack } LOG(FATAL) << "Impossible kind plan node " << static_cast(kind); diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index d81bf638242..83ce1c9e1be 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -27,6 +27,9 @@ class PlanNode { kGetNeighbors, kGetVertices, kGetEdges, + kTraverse, + kAppendVertices, + // ------------------ // TODO(yee): refactor in logical plan kIndexScan, diff --git a/src/graph/planner/plan/Query.cpp b/src/graph/planner/plan/Query.cpp index 07954678eee..988f8b33245 100644 --- a/src/graph/planner/plan/Query.cpp +++ b/src/graph/planner/plan/Query.cpp @@ -601,5 +601,52 @@ void UnionAllVersionVar::cloneMembers(const UnionAllVersionVar& f) { SingleInputNode::cloneMembers(f); } +Traverse* Traverse::clone() const { + auto newGN = Traverse::make(qctx_, nullptr, space_); + newGN->cloneMembers(*this); + return newGN; +} + +void Traverse::cloneMembers(const Traverse& g) { + GetNeighbors::cloneMembers(g); + + setStepRange(g.range_); + setVertexFilter(g.vFilter_->clone()); + setEdgeFilter(g.eFilter_->clone()); +} + +std::unique_ptr Traverse::explain() const { + auto desc = GetNeighbors::explain(); + if (range_ != nullptr) { + addDescription("steps", range_->toString(), desc.get()); + } + if (vFilter_ != nullptr) { + addDescription("vertex filter", vFilter_->toString(), desc.get()); + } + if (eFilter_ != nullptr) { + addDescription("edge filter", eFilter_->toString(), desc.get()); + } + return desc; +} + +AppendVertices* AppendVertices::clone() const { + auto newAV = AppendVertices::make(qctx_, nullptr, space_); + newAV->cloneMembers(*this); + return newAV; +} + +void AppendVertices::cloneMembers(const AppendVertices& a) { + GetVertices::cloneMembers(a); + + setVertexFilter(a.vFilter_->clone()); +} + +std::unique_ptr AppendVertices::explain() const { + auto desc = GetVertices::explain(); + if (vFilter_ != nullptr) { + addDescription("vertex filter", vFilter_->toString(), desc.get()); + } + return desc; +} } // namespace graph } // namespace nebula diff --git a/src/graph/planner/plan/Query.h b/src/graph/planner/plan/Query.h index b62622bb0e4..13629bf2ef5 100644 --- a/src/graph/planner/plan/Query.h +++ b/src/graph/planner/plan/Query.h @@ -120,10 +120,10 @@ using Direction = nebula::storage::cpp2::EdgeDirection; /** * Get neighbors' property */ -class GetNeighbors final : public Explore { +class GetNeighbors : public Explore { public: static GetNeighbors* make(QueryContext* qctx, PlanNode* input, GraphSpaceID space) { - return qctx->objPool()->add(new GetNeighbors(qctx, input, space)); + return qctx->objPool()->add(new GetNeighbors(qctx, Kind::kGetNeighbors, input, space)); } static GetNeighbors* make(QueryContext* qctx, @@ -198,15 +198,15 @@ class GetNeighbors final : public Explore { PlanNode* clone() const override; std::unique_ptr explain() const override; - private: - GetNeighbors(QueryContext* qctx, PlanNode* input, GraphSpaceID space) - : Explore(qctx, Kind::kGetNeighbors, input, space) { + protected: + GetNeighbors(QueryContext* qctx, Kind kind, PlanNode* input, GraphSpaceID space) + : Explore(qctx, kind, input, space) { setLimit(-1); } - private: void cloneMembers(const GetNeighbors&); + private: Expression* src_{nullptr}; std::vector edgeTypes_; storage::cpp2::EdgeDirection edgeDirection_{Direction::OUT_EDGE}; @@ -220,7 +220,7 @@ class GetNeighbors final : public Explore { /** * Get property with given vertex keys. */ -class GetVertices final : public Explore { +class GetVertices : public Explore { public: static GetVertices* make(QueryContext* qctx, PlanNode* input, @@ -233,6 +233,7 @@ class GetVertices final : public Explore { int64_t limit = std::numeric_limits::max(), Expression* filter = nullptr) { return qctx->objPool()->add(new GetVertices(qctx, + Kind::kGetVertices, input, space, src, @@ -259,8 +260,9 @@ class GetVertices final : public Explore { PlanNode* clone() const override; std::unique_ptr explain() const override; - private: + protected: GetVertices(QueryContext* qctx, + Kind kind, PlanNode* input, GraphSpaceID space, Expression* src, @@ -270,7 +272,7 @@ class GetVertices final : public Explore { std::vector orderBy, int64_t limit, Expression* filter) - : Explore(qctx, Kind::kGetVertices, input, space, dedup, limit, filter, std::move(orderBy)), + : Explore(qctx, kind, input, space, dedup, limit, filter, std::move(orderBy)), src_(src), props_(std::move(props)), exprs_(std::move(exprs)) {} @@ -1146,6 +1148,110 @@ class UnionAllVersionVar final : public SingleInputNode { void cloneMembers(const UnionAllVersionVar&); }; +class Traverse final : public GetNeighbors { + public: + using VertexProps = std::unique_ptr>; + using EdgeProps = std::unique_ptr>; + using StatProps = std::unique_ptr>; + using Exprs = std::unique_ptr>; + + static Traverse* make(QueryContext* qctx, PlanNode* input, GraphSpaceID space) { + return qctx->objPool()->add(new Traverse(qctx, input, space)); + } + + static Traverse* make(QueryContext* qctx, + PlanNode* input, + GraphSpaceID space, + Expression* src, + std::vector edgeTypes, + storage::cpp2::EdgeDirection edgeDirection, + VertexProps&& vertexProps, + EdgeProps&& edgeProps, + StatProps&& statProps, + Exprs&& exprs, + bool dedup = false, + bool random = false, + std::vector orderBy = {}, + int64_t limit = -1, + Expression* filter = nullptr) { + auto traverse = make(qctx, input, space); + traverse->setSrc(src); + traverse->setEdgeTypes(std::move(edgeTypes)); + traverse->setEdgeDirection(edgeDirection); + traverse->setVertexProps(std::move(vertexProps)); + traverse->setEdgeProps(std::move(edgeProps)); + traverse->setExprs(std::move(exprs)); + traverse->setStatProps(std::move(statProps)); + traverse->setRandom(random); + traverse->setDedup(dedup); + traverse->setOrderBy(std::move(orderBy)); + traverse->setLimit(limit); + traverse->setFilter(std::move(filter)); + return traverse; + } + + std::unique_ptr explain() const override; + + Traverse* clone() const override; + + MatchStepRange* stepRange() const { return range_; } + + Expression* vFilter() const { return vFilter_; } + + Expression* eFilter() const { return eFilter_; } + + void setStepRange(MatchStepRange* range) { range_ = range; } + + void setVertexFilter(Expression* vFilter) { vFilter_ = vFilter; } + + void setEdgeFilter(Expression* eFilter) { eFilter_ = eFilter; } + + private: + Traverse(QueryContext* qctx, PlanNode* input, GraphSpaceID space) + : GetNeighbors(qctx, Kind::kTraverse, input, space) { + setLimit(-1); + } + + private: + void cloneMembers(const Traverse& g); + + MatchStepRange* range_{nullptr}; + Expression* vFilter_{nullptr}; + Expression* eFilter_{nullptr}; +}; + +class AppendVertices final : public GetVertices { + public: + static AppendVertices* make(QueryContext* qctx, PlanNode* input, GraphSpaceID space) { + return qctx->objPool()->add(new AppendVertices(qctx, input, space)); + } + + std::unique_ptr explain() const override; + + AppendVertices* clone() const override; + + Expression* vFilter() const { return vFilter_; } + + void setVertexFilter(Expression* vFilter) { vFilter_ = vFilter; } + + private: + AppendVertices(QueryContext* qctx, PlanNode* input, GraphSpaceID space) + : GetVertices(qctx, + Kind::kAppendVertices, + input, + space, + nullptr, + nullptr, + nullptr, + false, + {}, + 0, + nullptr) {} + + void cloneMembers(const AppendVertices& a); + + Expression* vFilter_; +}; } // namespace graph } // namespace nebula #endif // GRAPH_PLANNER_PLAN_QUERY_H_ diff --git a/src/graph/util/SchemaUtil.cpp b/src/graph/util/SchemaUtil.cpp index a19e381e4ea..8dbf5eb593b 100644 --- a/src/graph/util/SchemaUtil.cpp +++ b/src/graph/util/SchemaUtil.cpp @@ -322,9 +322,9 @@ bool SchemaUtil::isValidVid(const Value &value) { } StatusOr>> SchemaUtil::getAllVertexProp( - QueryContext *qctx, const SpaceInfo &space, bool withProp) { + QueryContext *qctx, GraphSpaceID spaceId, bool withProp) { // Get all tags in the space - const auto allTagsResult = qctx->schemaMng()->getAllLatestVerTagSchema(space.id); + const auto allTagsResult = qctx->schemaMng()->getAllLatestVerTagSchema(spaceId); NG_RETURN_IF_ERROR(allTagsResult); // allTags: std::unordered_map> diff --git a/src/graph/util/SchemaUtil.h b/src/graph/util/SchemaUtil.h index 3e6f6f13af4..b32e461d130 100644 --- a/src/graph/util/SchemaUtil.h +++ b/src/graph/util/SchemaUtil.h @@ -61,7 +61,7 @@ class SchemaUtil final { // Fetch all tags in the space and retrieve props from tags // only take _tag when withProp is false static StatusOr>> getAllVertexProp(QueryContext* qctx, - const SpaceInfo& space, + GraphSpaceID spaceId, bool withProp); // retrieve prop from specific edgetypes diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 8e36383c309..c2c93d8d024 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -138,10 +138,10 @@ Status MatchValidator::buildPathExpr(const MatchPath *path, auto *pool = qctx_->objPool(); auto pathBuild = PathBuildExpression::make(pool); for (size_t i = 0; i < edgeInfos.size(); ++i) { - pathBuild->add(VariablePropertyExpression::make(pool, "", nodeInfos[i].alias)); - pathBuild->add(VariablePropertyExpression::make(pool, "", edgeInfos[i].alias)); + pathBuild->add(InputPropertyExpression::make(pool, nodeInfos[i].alias)); + pathBuild->add(InputPropertyExpression::make(pool, edgeInfos[i].alias)); } - pathBuild->add(VariablePropertyExpression::make(pool, "", nodeInfos.back().alias)); + pathBuild->add(InputPropertyExpression::make(pool, nodeInfos.back().alias)); matchClauseCtx.pathBuild = std::move(pathBuild); return Status::OK(); } @@ -182,13 +182,13 @@ Status MatchValidator::buildNodeInfo(const MatchPath *path, } Expression *filter = nullptr; if (props != nullptr) { - auto result = makeSubFilter(alias, props); + auto result = makeNodeSubFilter(props, "*"); NG_RETURN_IF_ERROR(result); filter = result.value(); } else if (node->labels() != nullptr && !node->labels()->labels().empty()) { const auto &labels = node->labels()->labels(); for (const auto &label : labels) { - auto result = makeSubFilter(alias, label->props(), *label->label()); + auto result = makeNodeSubFilter(label->props(), *label->label()); NG_RETURN_IF_ERROR(result); filter = andConnect(pool, filter, result.value()); } @@ -250,7 +250,7 @@ Status MatchValidator::buildEdgeInfo(const MatchPath *path, } Expression *filter = nullptr; if (props != nullptr) { - auto result = makeSubFilter(alias, props); + auto result = makeEdgeSubFilter(props); NG_RETURN_IF_ERROR(result); filter = result.value(); } @@ -424,14 +424,9 @@ Status MatchValidator::validateStepRange(const MatchStepRange *range) const { return Status::SemanticError( "Max hop must be greater equal than min hop: %ld vs. %ld", max, min); } - if (max == std::numeric_limits::max()) { + if (max == std::numeric_limits::max()) { return Status::SemanticError("Cannot set maximum hop for variable length relationships"); } - if (min < 0) { - return Status::SemanticError( - "Cannot set negative steps minumum hop for variable length " - "relationships"); - } return Status::OK(); } @@ -526,16 +521,40 @@ Status MatchValidator::validateUnwind(const UnwindClause *unwindClause, return Status::OK(); } -StatusOr MatchValidator::makeSubFilter(const std::string &alias, - const MapExpression *map, - const std::string &label) const { +StatusOr MatchValidator::makeEdgeSubFilter(const MapExpression *map) const { + auto *pool = qctx_->objPool(); + DCHECK(map != nullptr); + auto &items = map->items(); + DCHECK(!items.empty()); + + if (!ExpressionUtils::isEvaluableExpr(items[0].second)) { + return Status::SemanticError("Props must be constant: `%s'", + items[0].second->toString().c_str()); + } + Expression *root = RelationalExpression::makeEQ( + pool, EdgePropertyExpression::make(pool, "*", items[0].first), items[0].second->clone()); + for (auto i = 1u; i < items.size(); i++) { + if (!ExpressionUtils::isEvaluableExpr(items[i].second)) { + return Status::SemanticError("Props must be constant: `%s'", + items[i].second->toString().c_str()); + } + auto *left = root; + auto *right = RelationalExpression::makeEQ( + pool, EdgePropertyExpression::make(pool, "*", items[i].first), items[i].second->clone()); + root = LogicalExpression::makeAnd(pool, left, right); + } + return root; +} + +StatusOr MatchValidator::makeNodeSubFilter(const MapExpression *map, + const std::string &label) const { auto *pool = qctx_->objPool(); // Node has tag without property if (!label.empty() && map == nullptr) { auto *left = ConstantExpression::make(pool, label); auto *args = ArgumentList::make(pool); - args->addArgument(LabelExpression::make(pool, alias)); + args->addArgument(VertexExpression::make(pool)); auto *right = FunctionCallExpression::make(pool, "tags", args); Expression *root = RelationalExpression::makeIn(pool, left, right); @@ -546,28 +565,20 @@ StatusOr MatchValidator::makeSubFilter(const std::string &alias, auto &items = map->items(); DCHECK(!items.empty()); - // TODO(dutor) Check if evaluable and evaluate - if (items[0].second->kind() != Expression::Kind::kConstant) { + if (!ExpressionUtils::isEvaluableExpr(items[0].second)) { return Status::SemanticError("Props must be constant: `%s'", items[0].second->toString().c_str()); } Expression *root = RelationalExpression::makeEQ( - pool, - LabelAttributeExpression::make( - pool, LabelExpression::make(pool, alias), ConstantExpression::make(pool, items[0].first)), - items[0].second->clone()); + pool, TagPropertyExpression::make(pool, label, items[0].first), items[0].second->clone()); for (auto i = 1u; i < items.size(); i++) { - if (items[i].second->kind() != Expression::Kind::kConstant) { + if (!ExpressionUtils::isEvaluableExpr(items[i].second)) { return Status::SemanticError("Props must be constant: `%s'", items[i].second->toString().c_str()); } auto *left = root; auto *right = RelationalExpression::makeEQ( - pool, - LabelAttributeExpression::make(pool, - LabelExpression::make(pool, alias), - ConstantExpression::make(pool, items[i].first)), - items[i].second->clone()); + pool, TagPropertyExpression::make(pool, label, items[i].first), items[i].second->clone()); root = LogicalExpression::makeAnd(pool, left, right); } return root; diff --git a/src/graph/validator/MatchValidator.h b/src/graph/validator/MatchValidator.h index 10e69a1450d..14259adc8d5 100644 --- a/src/graph/validator/MatchValidator.h +++ b/src/graph/validator/MatchValidator.h @@ -59,10 +59,6 @@ class MatchValidator final : public Validator { Status includeExisting(const CypherClauseContextBase *cypherClauseCtx, YieldColumns *columns) const; - StatusOr makeSubFilter(const std::string &alias, - const MapExpression *map, - const std::string &label = "") const; - static Expression *andConnect(ObjectPool *pool, Expression *left, Expression *right); template @@ -93,6 +89,11 @@ class MatchValidator final : public Validator { Status buildOutputs(const YieldColumns *yields); + StatusOr makeEdgeSubFilter(const MapExpression *map) const; + + StatusOr makeNodeSubFilter(const MapExpression *map, + const std::string &label) const; + private: std::unique_ptr matchCtx_; }; diff --git a/src/graph/validator/test/MatchValidatorTest.cpp b/src/graph/validator/test/MatchValidatorTest.cpp index 4e234d7fd9e..481b09fc7fc 100644 --- a/src/graph/validator/test/MatchValidatorTest.cpp +++ b/src/graph/validator/test/MatchValidatorTest.cpp @@ -16,14 +16,8 @@ TEST_F(MatchValidatorTest, SeekByTagIndex) { { std::string query = "MATCH (v:person) RETURN id(v) AS id;"; std::vector expected = {PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - // TODO this tag filter could remove in this case - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -32,14 +26,8 @@ TEST_F(MatchValidatorTest, SeekByTagIndex) { { std::string query = "MATCH (v:book) RETURN id(v) AS id;"; std::vector expected = {PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - // TODO this tag filter could remove in this case - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -48,20 +36,9 @@ TEST_F(MatchValidatorTest, SeekByTagIndex) { { std::string query = "MATCH (p:person)-[:like]->(b:book) RETURN b.name AS book;"; std::vector expected = {PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -78,18 +55,10 @@ TEST_F(MatchValidatorTest, SeekByEdgeIndex) { { std::string query = "MATCH (v1)-[:like]->(v2) RETURN id(v1), id(v2);"; std::vector expected = {PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -108,13 +77,8 @@ TEST_F(MatchValidatorTest, groupby) { "avg(distinct n.age) AS age," "labels(n) AS lb;"; std::vector expected = {PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -131,13 +95,8 @@ TEST_F(MatchValidatorTest, groupby) { "labels(n) AS lb;"; std::vector expected = {PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -157,13 +116,8 @@ TEST_F(MatchValidatorTest, groupby) { PlanNode::Kind::kSort, PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -185,13 +139,8 @@ TEST_F(MatchValidatorTest, groupby) { PlanNode::Kind::kSort, PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -207,19 +156,9 @@ TEST_F(MatchValidatorTest, groupby) { "avg(distinct n.age) AS age," "labels(m) AS lb;"; std::vector expected = {PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -236,19 +175,9 @@ TEST_F(MatchValidatorTest, groupby) { "labels(m) AS lb;"; std::vector expected = {PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -266,19 +195,9 @@ TEST_F(MatchValidatorTest, groupby) { "labels(m) AS lb "; std::vector expected = {PlanNode::Kind::kAggregate, PlanNode::Kind::kFilter, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -299,19 +218,9 @@ TEST_F(MatchValidatorTest, groupby) { PlanNode::Kind::kLimit, PlanNode::Kind::kAggregate, PlanNode::Kind::kFilter, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -331,19 +240,9 @@ TEST_F(MatchValidatorTest, groupby) { PlanNode::Kind::kDedup, PlanNode::Kind::kAggregate, PlanNode::Kind::kFilter, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); @@ -360,14 +259,16 @@ TEST_F(MatchValidatorTest, groupby) { "avg(distinct n.age)+1 AS age," "labels(m) AS lb " "SKIP 10 LIMIT 20;"; - std::vector expected = { - PlanNode::Kind::kDataCollect, PlanNode::Kind::kLimit, PlanNode::Kind::kProject, - PlanNode::Kind::kAggregate, PlanNode::Kind::kFilter, PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, PlanNode::Kind::kInnerJoin, PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, PlanNode::Kind::kDedup, PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, PlanNode::Kind::kProject, PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, PlanNode::Kind::kDedup, PlanNode::Kind::kProject, - PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; + std::vector expected = {PlanNode::Kind::kDataCollect, + PlanNode::Kind::kLimit, + PlanNode::Kind::kProject, + PlanNode::Kind::kAggregate, + PlanNode::Kind::kFilter, + PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, + PlanNode::Kind::kIndexScan, + PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); } { @@ -383,15 +284,18 @@ TEST_F(MatchValidatorTest, groupby) { "labels(m) AS lb " "ORDER BY id " "SKIP 10 LIMIT 20;"; - std::vector expected = { - PlanNode::Kind::kDataCollect, PlanNode::Kind::kLimit, PlanNode::Kind::kSort, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, PlanNode::Kind::kFilter, PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, PlanNode::Kind::kProject, PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, PlanNode::Kind::kFilter, PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kIndexScan, - PlanNode::Kind::kStart}; + std::vector expected = {PlanNode::Kind::kDataCollect, + PlanNode::Kind::kLimit, + PlanNode::Kind::kSort, + PlanNode::Kind::kDedup, + PlanNode::Kind::kProject, + PlanNode::Kind::kAggregate, + PlanNode::Kind::kFilter, + PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, + PlanNode::Kind::kIndexScan, + PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); } { @@ -407,14 +311,17 @@ TEST_F(MatchValidatorTest, groupby) { "labels(m) AS lb " "ORDER BY id " "SKIP 10 LIMIT 20;"; - std::vector expected = { - PlanNode::Kind::kDataCollect, PlanNode::Kind::kLimit, PlanNode::Kind::kSort, - PlanNode::Kind::kDedup, PlanNode::Kind::kAggregate, PlanNode::Kind::kFilter, - PlanNode::Kind::kFilter, PlanNode::Kind::kProject, PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, PlanNode::Kind::kGetVertices, PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, PlanNode::Kind::kFilter, PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, PlanNode::Kind::kGetNeighbors, PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; + std::vector expected = {PlanNode::Kind::kDataCollect, + PlanNode::Kind::kLimit, + PlanNode::Kind::kSort, + PlanNode::Kind::kDedup, + PlanNode::Kind::kAggregate, + PlanNode::Kind::kFilter, + PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, + PlanNode::Kind::kIndexScan, + PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); } { @@ -430,17 +337,19 @@ TEST_F(MatchValidatorTest, groupby) { "labels(m) AS lb " "ORDER BY id " "SKIP 10 LIMIT 20;"; - std::vector expected = { - PlanNode::Kind::kDataCollect, PlanNode::Kind::kLimit, PlanNode::Kind::kSort, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, PlanNode::Kind::kFilter, PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, PlanNode::Kind::kProject, PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kFilter, PlanNode::Kind::kProject, PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, PlanNode::Kind::kFilter, PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, PlanNode::Kind::kIndexScan, - PlanNode::Kind::kStart}; + std::vector expected = {PlanNode::Kind::kDataCollect, + PlanNode::Kind::kLimit, + PlanNode::Kind::kSort, + PlanNode::Kind::kDedup, + PlanNode::Kind::kProject, + PlanNode::Kind::kAggregate, + PlanNode::Kind::kFilter, + PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, + PlanNode::Kind::kTraverse, + PlanNode::Kind::kIndexScan, + PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); } } @@ -454,19 +363,9 @@ TEST_F(MatchValidatorTest, with) { std::vector expected = {PlanNode::Kind::kProject, PlanNode::Kind::kProject, PlanNode::Kind::kAggregate, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kInnerJoin, - PlanNode::Kind::kProject, - PlanNode::Kind::kGetVertices, - PlanNode::Kind::kDedup, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kProject, - PlanNode::Kind::kFilter, - PlanNode::Kind::kGetNeighbors, - PlanNode::Kind::kDedup, PlanNode::Kind::kProject, + PlanNode::Kind::kAppendVertices, + PlanNode::Kind::kTraverse, PlanNode::Kind::kIndexScan, PlanNode::Kind::kStart}; EXPECT_TRUE(checkResult(query, expected)); diff --git a/src/graph/validator/test/QueryValidatorTest.cpp b/src/graph/validator/test/QueryValidatorTest.cpp index f50c22c27c6..eaa3c449a1c 100644 --- a/src/graph/validator/test/QueryValidatorTest.cpp +++ b/src/graph/validator/test/QueryValidatorTest.cpp @@ -1177,19 +1177,9 @@ TEST_F(QueryValidatorTest, TestMatch) { "RETURN type(r) AS Type, v2.name AS Name"; std::vector expected = { PK::kProject, - PK::kFilter, - PK::kProject, - PK::kInnerJoin, - PK::kProject, - PK::kGetVertices, - PK::kDedup, - PK::kProject, - PK::kFilter, - PK::kProject, - PK::kFilter, - PK::kGetNeighbors, - PK::kDedup, PK::kProject, + PK::kAppendVertices, + PK::kTraverse, PK::kIndexScan, PK::kStart, }; @@ -1200,11 +1190,15 @@ TEST_F(QueryValidatorTest, TestMatch) { "MATCH (:person{name:'Dwyane Wade'}) -[:like]-> () -[:like]-> (v3) " "RETURN DISTINCT v3.name AS Name"; std::vector expected = { - PK::kDataCollect, PK::kDedup, PK::kProject, PK::kFilter, PK::kProject, - PK::kInnerJoin, PK::kProject, PK::kGetVertices, PK::kDedup, PK::kProject, - PK::kInnerJoin, PK::kFilter, PK::kProject, PK::kGetNeighbors, PK::kDedup, - PK::kProject, PK::kFilter, PK::kProject, PK::kFilter, PK::kGetNeighbors, - PK::kDedup, PK::kProject, PK::kIndexScan, PK::kStart, + PK::kDataCollect, + PK::kDedup, + PK::kProject, + PK::kProject, + PK::kAppendVertices, + PK::kTraverse, + PK::kTraverse, + PK::kIndexScan, + PK::kStart, }; EXPECT_TRUE(checkResult(query, expected)); } @@ -1216,18 +1210,10 @@ TEST_F(QueryValidatorTest, TestMatch) { std::vector expected = { PK::kProject, PK::kFilter, - PK::kFilter, PK::kProject, - PK::kInnerJoin, - PK::kProject, - PK::kGetVertices, + PK::kAppendVertices, + PK::kTraverse, PK::kDedup, - PK::kProject, - PK::kFilter, - PK::kProject, - PK::kGetNeighbors, - PK::kDedup, - PK::kProject, PK::kPassThrough, PK::kStart, }; @@ -1238,53 +1224,25 @@ TEST_F(QueryValidatorTest, TestMatch) { "MATCH (v1)-[e:serve*2..3{start_year: 2000}]-(v2) " "WHERE id(v1) == \"LeBron James\"" "RETURN v1, v2"; - std::vector expected = {PK::kProject, - PK::kFilter, - PK::kFilter, - PK::kProject, - PK::kInnerJoin, - PK::kProject, - PK::kGetVertices, - PK::kDedup, - PK::kProject, - PK::kFilter, - PK::kUnionAllVersionVar, - PK::kLoop, - PK::kProject, - PK::kFilter, - PK::kFilter, - PK::kProject, - PK::kGetNeighbors, - PK::kInnerJoin, - PK::kDedup, - PK::kProject, - PK::kProject, - PK::kFilter, - PK::kPassThrough, - PK::kGetNeighbors, - PK::kStart, - PK::kDedup, - PK::kProject, - PK::kStart}; - EXPECT_TRUE(checkResult(query, expected)); - } - { - std::string query = "MATCH p = (n)-[]-(m:person{name:\"LeBron James\"}) RETURN p"; std::vector expected = { PK::kProject, PK::kFilter, PK::kProject, - PK::kInnerJoin, - PK::kProject, - PK::kGetVertices, + PK::kAppendVertices, + PK::kTraverse, PK::kDedup, + PK::kPassThrough, + PK::kStart, + }; + EXPECT_TRUE(checkResult(query, expected)); + } + { + std::string query = "MATCH p = (n)-[]-(m:person{name:\"LeBron James\"}) RETURN p"; + std::vector expected = { PK::kProject, - PK::kFilter, - PK::kProject, - PK::kFilter, - PK::kGetNeighbors, - PK::kDedup, PK::kProject, + PK::kAppendVertices, + PK::kTraverse, PK::kIndexScan, PK::kStart, }; diff --git a/src/parser/MatchSentence.cpp b/src/parser/MatchSentence.cpp index a0fe1de9a35..a6f06fe3c93 100644 --- a/src/parser/MatchSentence.cpp +++ b/src/parser/MatchSentence.cpp @@ -7,6 +7,10 @@ namespace nebula { +std::string MatchStepRange::toString() const { + return folly::stringPrintf("%lu..%lu", min(), max()); +} + std::string MatchClause::toString() const { std::string buf; buf.reserve(256); @@ -107,7 +111,7 @@ std::string MatchEdge::toString() const { buf += "*"; if (range_->min() == range_->max()) { buf += folly::to(range_->min()); - } else if (range_->max() == std::numeric_limits::max()) { + } else if (range_->max() == std::numeric_limits::max()) { if (range_->min() != 1) { buf += folly::to(range_->min()); buf += ".."; diff --git a/src/parser/MatchSentence.h b/src/parser/MatchSentence.h index 923dda6f31d..4390650b164 100644 --- a/src/parser/MatchSentence.h +++ b/src/parser/MatchSentence.h @@ -28,7 +28,7 @@ class MatchEdgeTypeList final { class MatchStepRange final { public: - explicit MatchStepRange(int64_t min, int64_t max = std::numeric_limits::max()) { + explicit MatchStepRange(size_t min = 0, size_t max = std::numeric_limits::max()) { min_ = min; max_ = max; } @@ -37,9 +37,11 @@ class MatchStepRange final { auto max() const { return max_; } + std::string toString() const; + private: - int64_t min_{1}; - int64_t max_{1}; + size_t min_{1}; + size_t max_{1}; }; class MatchEdgeProp final { diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 94d3f20c15d..9322db81431 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -1687,16 +1687,36 @@ match_step_range $$ = new MatchStepRange(1); } | STAR legal_integer { - $$ = new MatchStepRange($2, $2); + if ($2 < 0) { + throw nebula::GraphParser::syntax_error(@2, "Expected an unsigned integer."); + } + auto step = static_cast($2); + $$ = new MatchStepRange(step, step); } | STAR DOT_DOT legal_integer { - $$ = new MatchStepRange(1, $3); + if ($3 < 0) { + throw nebula::GraphParser::syntax_error(@3, "Expected an unsigned integer."); + } + auto step = static_cast($3); + $$ = new MatchStepRange(1, step); } | STAR legal_integer DOT_DOT { - $$ = new MatchStepRange($2); + if ($2 < 0) { + throw nebula::GraphParser::syntax_error(@2, "Expected an unsigned integer."); + } + auto step = static_cast($2); + $$ = new MatchStepRange(step); } | STAR legal_integer DOT_DOT legal_integer { - $$ = new MatchStepRange($2, $4); + if ($2 < 0) { + throw nebula::GraphParser::syntax_error(@2, "Expected an unsigned integer."); + } + auto min = static_cast($2); + if ($4 < 0) { + throw nebula::GraphParser::syntax_error(@4, "Expected an unsigned integer."); + } + auto max = static_cast($4); + $$ = new MatchStepRange(min, max); } ; diff --git a/tests/tck/features/expression/RelationalExpr.feature b/tests/tck/features/expression/RelationalExpr.feature index 5bae6fe14c8..f1c0f64e33a 100644 --- a/tests/tck/features/expression/RelationalExpr.feature +++ b/tests/tck/features/expression/RelationalExpr.feature @@ -227,12 +227,9 @@ Feature: RelationalExpression | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 13 | | - | 13 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 13 | | - | 15 | GetVertices | 11 | | - | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE"}}} | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | Project | 8 | | + | 8 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE"}}} | + | 0 | Start | | | diff --git a/tests/tck/features/expression/UnaryExpr.feature b/tests/tck/features/expression/UnaryExpr.feature index b117cfca3fb..8ef2ad970bf 100644 --- a/tests/tck/features/expression/UnaryExpr.feature +++ b/tests/tck/features/expression/UnaryExpr.feature @@ -95,12 +95,9 @@ Feature: UnaryExpression | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 12 | | - | 12 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 14 | | - | 14 | GetVertices | 11 | | - | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE"}}} | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | Project | 8 | | + | 8 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | | + | 0 | Start | | | diff --git a/tests/tck/features/match/With.feature b/tests/tck/features/match/With.feature index f7a013e6af3..0cccf1132a3 100644 --- a/tests/tck/features/match/With.feature +++ b/tests/tck/features/match/With.feature @@ -126,18 +126,15 @@ Feature: With clause | ("Carmelo Anthony" :player{age: 34, name: "Carmelo Anthony"}) | 34 | | ("LeBron James" :player{age: 34, name: "LeBron James"}) | 34 | And the execution plan should be: - | id | name | dependencies | operator info | - | 13 | Project | 12 | | - | 12 | Filter | 17 | {"isStable": "true"} | - | 17 | TopN | 9 | | - | 9 | Project | 8 | | - | 8 | Filter | 7 | {"isStable": "false"} | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 16 | {"isStable": "false"} | - | 16 | GetVertices | 1 | | - | 1 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 8 | Project | 7 | | + | 7 | Filter | 11 | | + | 11 | TopN | 4 | | + | 4 | Project | 3 | | + | 3 | Project | 2 | | + | 2 | AppendVertices | 1 | | + | 1 | IndexScan | 0 | | + | 0 | Start | | | When executing query: """ MATCH (v:player)-[:like]->(v2) diff --git a/tests/tck/features/optimizer/CollapseProjectRule.feature b/tests/tck/features/optimizer/CollapseProjectRule.feature index ded6117e92b..22712c9cb02 100644 --- a/tests/tck/features/optimizer/CollapseProjectRule.feature +++ b/tests/tck/features/optimizer/CollapseProjectRule.feature @@ -28,15 +28,12 @@ Feature: Collapse Project Rule | 4 | | 3 | And the execution plan should be: - | id | name | dependencies | operator info | - | 16 | Project | 14 | | - | 14 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 18 | | - | 18 | GetVertices | 12 | | - | 12 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 12 | Project | 10 | | + | 10 | Filter | 2 | | + | 2 | AppendVertices | 7 | | + | 7 | IndexScan | 0 | | + | 0 | Start | | | When profiling query: """ LOOKUP ON player diff --git a/tests/tck/features/optimizer/CombineFilterRule.feature b/tests/tck/features/optimizer/CombineFilterRule.feature index 1a8f7b807b4..08392f5c30f 100644 --- a/tests/tck/features/optimizer/CombineFilterRule.feature +++ b/tests/tck/features/optimizer/CombineFilterRule.feature @@ -19,16 +19,10 @@ Feature: combine filters | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | ("Jason Kidd" :player{age: 45, name: "Jason Kidd"}) | | ("Jason Kidd" :player{age: 45, name: "Jason Kidd"}) | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 16 | Project | 18 | | - | 18 | Filter | 13 | {"condition": "(($v.age>40) AND ($n.age>42) AND !(hasSameEdgeInPath($-.__COL_0)))"} | - | 13 | Project | 12 | | - | 12 | InnerJoin | 11 | | - | 11 | Project | 20 | | - | 20 | GetVertices | 7 | | - | 7 | Filter | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 22 | | - | 22 | GetNeighbors | 17 | | - | 17 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 10 | Project | 9 | | + | 9 | Filter | 3 | | + | 3 | AppendVertices | 2 | | + | 2 | Traverse | 7 | | + | 7 | IndexScan | 0 | | + | 0 | Start | | | diff --git a/tests/tck/features/optimizer/IndexScanRule.feature b/tests/tck/features/optimizer/IndexScanRule.feature index d44c5701d7e..8f8e5be6d84 100644 --- a/tests/tck/features/optimizer/IndexScanRule.feature +++ b/tests/tck/features/optimizer/IndexScanRule.feature @@ -20,15 +20,12 @@ Feature: Match index selection | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 13 | | - | 13 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 15 | | - | 15 | GetVertices | 11 | | - | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"name","beginValue":"\"Tim Duncan\"","endValue":"\"Yao Ming\"","includeBegin":"false","includeEnd":"true"}}} | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | Project | 8 | | + | 8 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"name","beginValue":"\"Tim Duncan\"","endValue":"\"Yao Ming\"","includeBegin":"false","includeEnd":"true"}}} | + | 0 | Start | | | When profiling query: """ MATCH (v:player) @@ -62,15 +59,12 @@ Feature: Match index selection | ("JaVale McGee" :player{age: 31, name: "JaVale McGee"}) | | ("Dwight Howard" :player{age: 33, name: "Dwight Howard"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 13 | | - | 13 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 15 | | - | 15 | GetVertices | 11 | | - | 11 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"age","beginValue":"30","endValue":"40","includeBegin":"false","includeEnd":"true"}}} | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | Project | 8 | | + | 8 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"age","beginValue":"30","endValue":"40","includeBegin":"false","includeEnd":"true"}}} | + | 0 | Start | | | Scenario: or filter embeding When profiling query: @@ -97,15 +91,11 @@ Feature: Match index selection | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 10 | Project | 13 | | - | 13 | Filter | 7 | {"condition":"!(hasSameEdgeInPath($-.__COL_0))"} | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 15 | | - | 15 | GetVertices | 11 | | - | 11 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 6 | Project | 2 | | + | 2 | AppendVertices | 5 | | + | 5 | IndexScan | 0 | | + | 0 | Start | | | Scenario: degenerate to full tag scan When profiling query: @@ -123,19 +113,13 @@ Feature: Match index selection | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | | ("Aron Baynes" :player{age: 32, name: "Aron Baynes"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | And the execution plan should be: - | id | name | dependencies | operator info | - | 16 | Project | 19 | | - | 19 | Filter | 13 | { "condition": "((($v.name<=\"Aron Baynes\") OR ($n.age>45)) AND !(hasSameEdgeInPath($-.__COL_0)))"} | - | 13 | Project | 12 | | - | 12 | InnerJoin | 11 | | - | 11 | Project | 21 | | - | 21 | GetVertices | 7 | | - | 7 | Filter | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 23 | | - | 23 | GetNeighbors | 17 | | - | 17 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | Project | 8 | | + | 8 | Filter | 3 | | + | 3 | AppendVertices | 2 | | + | 2 | Traverse | 1 | | + | 1 | IndexScan | 0 | | + | 0 | Start | | | # This is actually the optimization for another optRule, # but it is necessary to ensure that the current optimization does not destroy this scenario # and it can be considered in the subsequent refactoring @@ -154,16 +138,11 @@ Feature: Match index selection | count | | 81 | And the execution plan should be: - | id | name | dependencies | operator info | - | 16 | Aggregate | 18 | | - | 18 | Filter | 13 | | - | 13 | Project | 12 | | - | 12 | InnerJoin | 11 | | - | 11 | Project | 20 | | - | 20 | GetVertices | 7 | | - | 7 | Filter | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 22 | | - | 22 | GetNeighbors | 17 | | - | 17 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 6 | Aggregate | 8 | | + | 8 | Project | 7 | | + | 7 | Filter | 3 | | + | 3 | AppendVertices | 2 | | + | 2 | Traverse | 1 | | + | 1 | IndexScan | 0 | | + | 0 | Start | | | diff --git a/tests/tck/features/optimizer/MergeGetNbrsDedupProjectRule.feature b/tests/tck/features/optimizer/MergeGetNbrsDedupProjectRule.feature index 9a2d37097f0..7eec224d430 100644 --- a/tests/tck/features/optimizer/MergeGetNbrsDedupProjectRule.feature +++ b/tests/tck/features/optimizer/MergeGetNbrsDedupProjectRule.feature @@ -1,6 +1,8 @@ # Copyright (c) 2021 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License. +# This optimization rule is not neccessary now +@skip Feature: merge get neighbors, dedup and project rule Background: diff --git a/tests/tck/features/optimizer/MergeGetVerticesDedupProjectRule.feature b/tests/tck/features/optimizer/MergeGetVerticesDedupProjectRule.feature index a514e9009ea..02beb81b619 100644 --- a/tests/tck/features/optimizer/MergeGetVerticesDedupProjectRule.feature +++ b/tests/tck/features/optimizer/MergeGetVerticesDedupProjectRule.feature @@ -1,6 +1,8 @@ # Copyright (c) 2021 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License. +# This optimization rule is not neccesarry now. +@skip Feature: merge get vertices, dedup and project rule Background: diff --git a/tests/tck/features/optimizer/PushLimitDownProjectRule.feature b/tests/tck/features/optimizer/PushLimitDownProjectRule.feature index b0bc1bf4f23..4ab978c904d 100644 --- a/tests/tck/features/optimizer/PushLimitDownProjectRule.feature +++ b/tests/tck/features/optimizer/PushLimitDownProjectRule.feature @@ -22,18 +22,13 @@ Feature: Push Limit down project rule | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | And the execution plan should be: - | id | name | dependencies | operator info | - | 18 | DataCollect | 26 | | - | 26 | Project | 25 | | - | 25 | Limit | 20 | | - | 20 | Filter | 13 | | - | 13 | Project | 12 | | - | 12 | InnerJoin | 11 | | - | 11 | Project | 22 | | - | 22 | GetVertices | 7 | | - | 7 | Filter | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 24 | | - | 24 | GetNeighbors | 1 | | - | 1 | PassThrough | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 9 | DataCollect | 19 | | + | 19 | Project | 16 | | + | 16 | Limit | 11 | | + | 11 | Filter | 4 | | + | 4 | AppendVertices | 3 | | + | 3 | Traverse | 2 | | + | 2 | Dedup | 1 | | + | 1 | PassThrough | 0 | | + | 0 | Start | | | diff --git a/tests/tck/features/optimizer/RemoveUselessProjectRule.feature b/tests/tck/features/optimizer/RemoveUselessProjectRule.feature index b25f2e831c7..894c472ac73 100644 --- a/tests/tck/features/optimizer/RemoveUselessProjectRule.feature +++ b/tests/tck/features/optimizer/RemoveUselessProjectRule.feature @@ -49,17 +49,14 @@ Feature: Remove Useless Project Rule | 47 | 1 | | 48 | 1 | And the execution plan should be: - | id | name | dependencies | operator info | - | 12 | DataCollect | 11 | | - | 11 | Sort | 14 | | - | 14 | Aggregate | 8 | | - | 8 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 16 | | - | 16 | GetVertices | 13 | | - | 13 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 7 | DataCollect | 6 | | + | 6 | Sort | 8 | | + | 8 | Aggregate | 3 | | + | 3 | Project | 2 | | + | 2 | AppendVertices | 1 | | + | 1 | IndexScan | 0 | | + | 0 | Start | | | When profiling query: """ MATCH p = (n:player{name:"Tony Parker"}) @@ -69,11 +66,8 @@ Feature: Remove Useless Project Rule | n | p | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | <("Tony Parker" :player{age: 36, name: "Tony Parker"})> | And the execution plan should be: - | id | name | dependencies | operator info | - | 11 | Filter | 7 | | - | 7 | Project | 6 | | - | 6 | Project | 5 | | - | 5 | Filter | 13 | | - | 13 | GetVertices | 10 | | - | 10 | IndexScan | 0 | | - | 0 | Start | | | + | id | name | dependencies | operator info | + | 6 | Project | 2 | | + | 2 | AppendVertices | 5 | | + | 5 | IndexScan | 0 | | + | 0 | Start | | | From 07d5e21304a87bac7f0bad78f0f2ab4b8d32057f Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Wed, 17 Nov 2021 17:59:47 +0800 Subject: [PATCH 22/53] replace kVertex with kTag (#3286) * replace kVertex with kTag * fixconflict --- src/common/utils/IndexKeyUtils.h | 2 +- src/common/utils/NebulaKeyUtils.cpp | 26 +++++----- src/common/utils/NebulaKeyUtils.h | 27 +++++----- src/common/utils/Types.h | 7 +-- src/common/utils/test/NebulaKeyUtilsTest.cpp | 38 +++++++------- src/kvstore/Part.cpp | 2 +- .../plugins/elasticsearch/ESListener.cpp | 2 +- src/kvstore/plugins/hbase/HBaseStore.cpp | 2 +- .../plugins/hbase/test/HBaseStoreTest.cpp | 2 +- src/kvstore/test/NebulaListenerTest.cpp | 4 +- src/kvstore/test/NebulaStoreTest.cpp | 4 +- src/kvstore/test/PartTest.cpp | 16 +++--- src/kvstore/test/RocksEngineTest.cpp | 50 +++++++++---------- src/storage/CompactionFilter.h | 2 +- src/storage/admin/RebuildTagIndexTask.cpp | 2 +- src/storage/admin/StatsTask.cpp | 2 +- src/storage/exec/IndexVertexScanNode.cpp | 8 +-- src/storage/exec/ScanNode.h | 2 +- src/storage/exec/TagNode.h | 2 +- src/storage/exec/UpdateNode.h | 2 +- src/storage/mutate/AddVerticesProcessor.cpp | 8 +-- src/storage/mutate/DeleteTagsProcessor.cpp | 4 +- .../mutate/DeleteVerticesProcessor.cpp | 4 +- .../AddAndUpdateVertexAndEdgeBenchmark.cpp | 2 +- src/storage/test/CompactionTest.cpp | 2 +- src/storage/test/GetNeighborsBenchmark.cpp | 2 +- src/storage/test/GetPropTest.cpp | 2 +- src/storage/test/IndexScanLimitTest.cpp | 4 +- src/storage/test/IndexTest.cpp | 2 +- src/storage/test/IndexWithTTLTest.cpp | 16 +++--- src/storage/test/IndexWriteTest.cpp | 10 ++-- src/storage/test/LookupIndexTest.cpp | 4 +- src/storage/test/MemoryLockBenchmark.cpp | 2 +- .../test/PrefixBloomFilterBenchmark.cpp | 4 +- src/storage/test/QueryStatsTest.cpp | 2 +- src/storage/test/QueryTestUtils.h | 2 +- src/storage/test/RebuildIndexTest.cpp | 2 +- src/storage/test/StatsTaskTest.cpp | 2 +- .../test/StorageIndexWriteBenchmark.cpp | 2 +- src/storage/test/TestUtils.h | 4 +- src/storage/test/UpdateVertexTest.cpp | 28 +++++------ src/tools/db-dump/DbDumper.cpp | 20 ++++---- src/tools/db-upgrade/DbUpgrader.cpp | 4 +- src/tools/db-upgrade/NebulaKeyUtilsV2.cpp | 2 +- src/tools/db-upgrade/NebulaKeyUtilsV2.h | 4 +- 45 files changed, 169 insertions(+), 171 deletions(-) diff --git a/src/common/utils/IndexKeyUtils.h b/src/common/utils/IndexKeyUtils.h index 1e5f6349509..c8bdd4fb967 100644 --- a/src/common/utils/IndexKeyUtils.h +++ b/src/common/utils/IndexKeyUtils.h @@ -457,7 +457,7 @@ class IndexKeyUtils final { } static VertexIDSlice getIndexVertexID(size_t vIdLen, const folly::StringPiece& rawKey) { - CHECK_GE(rawKey.size(), kVertexIndexLen + vIdLen); + CHECK_GE(rawKey.size(), kTagIndexLen + vIdLen); auto offset = rawKey.size() - vIdLen; return rawKey.subpiece(offset, vIdLen); } diff --git a/src/common/utils/NebulaKeyUtils.cpp b/src/common/utils/NebulaKeyUtils.cpp index 6445d8e88dd..9e7cc2585f7 100644 --- a/src/common/utils/NebulaKeyUtils.cpp +++ b/src/common/utils/NebulaKeyUtils.cpp @@ -34,13 +34,13 @@ std::string NebulaKeyUtils::lastKey(const std::string& prefix, size_t count) { } // static -std::string NebulaKeyUtils::vertexKey( +std::string NebulaKeyUtils::tagKey( size_t vIdLen, PartitionID partId, const VertexID& vId, TagID tagId, char pad) { CHECK_GE(vIdLen, vId.size()); - int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kVertex); + int32_t item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kTag_); std::string key; - key.reserve(kVertexLen + vIdLen); + key.reserve(kTagLen + vIdLen); key.append(reinterpret_cast(&item), sizeof(int32_t)) .append(vId.data(), vId.size()) .append(vIdLen - vId.size(), pad) @@ -106,12 +106,12 @@ std::string NebulaKeyUtils::kvKey(PartitionID partId, const folly::StringPiece& } // static -std::string NebulaKeyUtils::vertexPrefix(size_t vIdLen, - PartitionID partId, - const VertexID& vId, - TagID tagId) { +std::string NebulaKeyUtils::tagPrefix(size_t vIdLen, + PartitionID partId, + const VertexID& vId, + TagID tagId) { CHECK_GE(vIdLen, vId.size()); - PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kVertex); + PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kTag_); std::string key; key.reserve(sizeof(PartitionID) + vIdLen + sizeof(TagID)); @@ -123,9 +123,9 @@ std::string NebulaKeyUtils::vertexPrefix(size_t vIdLen, } // static -std::string NebulaKeyUtils::vertexPrefix(size_t vIdLen, PartitionID partId, const VertexID& vId) { +std::string NebulaKeyUtils::tagPrefix(size_t vIdLen, PartitionID partId, const VertexID& vId) { CHECK_GE(vIdLen, vId.size()); - PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kVertex); + PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kTag_); std::string key; key.reserve(sizeof(PartitionID) + vIdLen); key.append(reinterpret_cast(&item), sizeof(PartitionID)) @@ -135,8 +135,8 @@ std::string NebulaKeyUtils::vertexPrefix(size_t vIdLen, PartitionID partId, cons } // static -std::string NebulaKeyUtils::vertexPrefix(PartitionID partId) { - PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kVertex); +std::string NebulaKeyUtils::tagPrefix(PartitionID partId) { + PartitionID item = (partId << kPartitionOffset) | static_cast(NebulaKeyType::kTag_); std::string key; key.reserve(sizeof(PartitionID)); key.append(reinterpret_cast(&item), sizeof(PartitionID)); @@ -210,7 +210,7 @@ std::vector NebulaKeyUtils::snapshotPrefix(PartitionID partId) { if (partId == 0) { result.emplace_back(""); } else { - result.emplace_back(vertexPrefix(partId)); + result.emplace_back(tagPrefix(partId)); result.emplace_back(edgePrefix(partId)); result.emplace_back(IndexKeyUtils::indexPrefix(partId)); // kSystem will be written when balance data diff --git a/src/common/utils/NebulaKeyUtils.h b/src/common/utils/NebulaKeyUtils.h index 5dc5904c2bf..f616e12b0d2 100644 --- a/src/common/utils/NebulaKeyUtils.h +++ b/src/common/utils/NebulaKeyUtils.h @@ -11,7 +11,7 @@ namespace nebula { /** - * VertexKeyUtils: + * TagKeyUtils: * type(1) + partId(3) + vertexId(*) + tagId(4) * * EdgeKeyUtils: @@ -55,7 +55,7 @@ class NebulaKeyUtils final { /** * Generate vertex key for kv store * */ - static std::string vertexKey( + static std::string tagKey( size_t vIdLen, PartitionID partId, const VertexID& vId, TagID tagId, char pad = '\0'); static std::string edgeKey(size_t vIdLen, @@ -75,14 +75,11 @@ class NebulaKeyUtils final { /** * Prefix for vertex * */ - static std::string vertexPrefix(size_t vIdLen, - PartitionID partId, - const VertexID& vId, - TagID tagId); + static std::string tagPrefix(size_t vIdLen, PartitionID partId, const VertexID& vId, TagID tagId); - static std::string vertexPrefix(size_t vIdLen, PartitionID partId, const VertexID& vId); + static std::string tagPrefix(size_t vIdLen, PartitionID partId, const VertexID& vId); - static std::string vertexPrefix(PartitionID partId); + static std::string tagPrefix(PartitionID partId); /** * Prefix for edge @@ -111,26 +108,26 @@ class NebulaKeyUtils final { return readInt(rawKey.data(), sizeof(PartitionID)) >> 8; } - static bool isVertex(size_t vIdLen, const folly::StringPiece& rawKey) { - if (rawKey.size() != kVertexLen + vIdLen) { + static bool isTag(size_t vIdLen, const folly::StringPiece& rawKey) { + if (rawKey.size() != kTagLen + vIdLen) { return false; } constexpr int32_t len = static_cast(sizeof(NebulaKeyType)); auto type = readInt(rawKey.data(), len) & kTypeMask; - return static_cast(type) == NebulaKeyType::kVertex; + return static_cast(type) == NebulaKeyType::kTag_; } static VertexIDSlice getVertexId(size_t vIdLen, const folly::StringPiece& rawKey) { - if (rawKey.size() != kVertexLen + vIdLen) { - dumpBadKey(rawKey, kVertexLen + vIdLen, vIdLen); + if (rawKey.size() != kTagLen + vIdLen) { + dumpBadKey(rawKey, kTagLen + vIdLen, vIdLen); } auto offset = sizeof(PartitionID); return rawKey.subpiece(offset, vIdLen); } static TagID getTagId(size_t vIdLen, const folly::StringPiece& rawKey) { - if (rawKey.size() != kVertexLen + vIdLen) { - dumpBadKey(rawKey, kVertexLen + vIdLen, vIdLen); + if (rawKey.size() != kTagLen + vIdLen) { + dumpBadKey(rawKey, kTagLen + vIdLen, vIdLen); } auto offset = sizeof(PartitionID) + vIdLen; return readInt(rawKey.data() + offset, sizeof(TagID)); diff --git a/src/common/utils/Types.h b/src/common/utils/Types.h index 1546cd537ee..8012ac91305 100644 --- a/src/common/utils/Types.h +++ b/src/common/utils/Types.h @@ -12,12 +12,13 @@ namespace nebula { enum class NebulaKeyType : uint32_t { - kVertex = 0x00000001, + kTag_ = 0x00000001, kEdge = 0x00000002, kIndex = 0x00000003, kSystem = 0x00000004, kOperation = 0x00000005, kKeyValue = 0x00000006, + // kVertex = 0x00000007, }; enum class NebulaSystemKeyType : uint32_t { @@ -41,7 +42,7 @@ static typename std::enable_if::value, T>::type readInt(cons } // size of vertex key except vertexId -static constexpr int32_t kVertexLen = sizeof(PartitionID) + sizeof(TagID); +static constexpr int32_t kTagLen = sizeof(PartitionID) + sizeof(TagID); // size of vertex key except srcId and dstId static constexpr int32_t kEdgeLen = @@ -56,7 +57,7 @@ static constexpr uint8_t kPartitionOffset = 8; // See KeyType enum static constexpr uint32_t kTypeMask = 0x000000FF; -static constexpr int32_t kVertexIndexLen = sizeof(PartitionID) + sizeof(IndexID); +static constexpr int32_t kTagIndexLen = sizeof(PartitionID) + sizeof(IndexID); static constexpr int32_t kEdgeIndexLen = sizeof(PartitionID) + sizeof(IndexID) + sizeof(EdgeRanking); diff --git a/src/common/utils/test/NebulaKeyUtilsTest.cpp b/src/common/utils/test/NebulaKeyUtilsTest.cpp index eb5504225b2..206d756daab 100644 --- a/src/common/utils/test/NebulaKeyUtilsTest.cpp +++ b/src/common/utils/test/NebulaKeyUtilsTest.cpp @@ -16,18 +16,18 @@ class KeyUtilsTestBase : public ::testing::Test { ~KeyUtilsTestBase() = default; - void verifyVertex(PartitionID partId, VertexID vId, TagID tagId, size_t actualSize) { - auto vertexKey = NebulaKeyUtils::vertexKey(vIdLen_, partId, vId, tagId); - ASSERT_EQ(vertexKey.size(), kVertexLen + vIdLen_); - ASSERT_EQ(vertexKey.substr(0, sizeof(PartitionID) + vIdLen_ + sizeof(TagID)), - NebulaKeyUtils::vertexPrefix(vIdLen_, partId, vId, tagId)); - ASSERT_EQ(vertexKey.substr(0, sizeof(PartitionID) + vIdLen_), - NebulaKeyUtils::vertexPrefix(vIdLen_, partId, vId)); - ASSERT_TRUE(NebulaKeyUtils::isVertex(vIdLen_, vertexKey)); - ASSERT_FALSE(NebulaKeyUtils::isEdge(vIdLen_, vertexKey)); - ASSERT_EQ(partId, NebulaKeyUtils::getPart(vertexKey)); - ASSERT_EQ(tagId, NebulaKeyUtils::getTagId(vIdLen_, vertexKey)); - ASSERT_EQ(vId, NebulaKeyUtils::getVertexId(vIdLen_, vertexKey).subpiece(0, actualSize)); + void verifyTag(PartitionID partId, VertexID vId, TagID tagId, size_t actualSize) { + auto tagKey = NebulaKeyUtils::tagKey(vIdLen_, partId, vId, tagId); + ASSERT_EQ(tagKey.size(), kTagLen + vIdLen_); + ASSERT_EQ(tagKey.substr(0, sizeof(PartitionID) + vIdLen_ + sizeof(TagID)), + NebulaKeyUtils::tagPrefix(vIdLen_, partId, vId, tagId)); + ASSERT_EQ(tagKey.substr(0, sizeof(PartitionID) + vIdLen_), + NebulaKeyUtils::tagPrefix(vIdLen_, partId, vId)); + ASSERT_TRUE(NebulaKeyUtils::isTag(vIdLen_, tagKey)); + ASSERT_FALSE(NebulaKeyUtils::isEdge(vIdLen_, tagKey)); + ASSERT_EQ(partId, NebulaKeyUtils::getPart(tagKey)); + ASSERT_EQ(tagId, NebulaKeyUtils::getTagId(vIdLen_, tagKey)); + ASSERT_EQ(vId, NebulaKeyUtils::getVertexId(vIdLen_, tagKey).subpiece(0, actualSize)); } void verifyEdge(PartitionID partId, @@ -47,7 +47,7 @@ class KeyUtilsTestBase : public ::testing::Test { 0, sizeof(PartitionID) + (vIdLen_ << 1) + sizeof(EdgeType) + sizeof(EdgeRanking)), NebulaKeyUtils::edgePrefix(vIdLen_, partId, srcId, type, rank, dstId)); ASSERT_TRUE(NebulaKeyUtils::isEdge(vIdLen_, edgeKey)); - ASSERT_FALSE(NebulaKeyUtils::isVertex(vIdLen_, edgeKey)); + ASSERT_FALSE(NebulaKeyUtils::isTag(vIdLen_, edgeKey)); ASSERT_EQ(partId, NebulaKeyUtils::getPart(edgeKey)); ASSERT_EQ(srcId, NebulaKeyUtils::getSrcId(vIdLen_, edgeKey).subpiece(0, actualSize)); ASSERT_EQ(dstId, NebulaKeyUtils::getDstId(vIdLen_, edgeKey).subpiece(0, actualSize)); @@ -85,7 +85,7 @@ TEST_F(V1Test, SimpleTest) { VertexID vId = getStringId(1001L); TagID tagId = 2020; TagVersion tagVersion = folly::Random::rand64(); - verifyVertex(partId, vId, tagId, tagVersion); + verifyTag(partId, vId, tagId, tagVersion); VertexID srcId = getStringId(1001L), dstId = getStringId(2001L); EdgeType type = 1010; @@ -98,7 +98,7 @@ TEST_F(V1Test, NegativeEdgeTypeTest) { PartitionID partId = 123; VertexID vId = getStringId(1001L); TagID tagId = 2020; - verifyVertex(partId, vId, tagId, sizeof(int64_t)); + verifyTag(partId, vId, tagId, sizeof(int64_t)); VertexID srcId = getStringId(1001L), dstId = getStringId(2001L); EdgeType type = -1010; @@ -111,7 +111,7 @@ TEST_F(V2ShortTest, SimpleTest) { PartitionID partId = 123; VertexID vId = "0123456789"; TagID tagId = 2020; - verifyVertex(partId, vId, tagId, 10); + verifyTag(partId, vId, tagId, 10); VertexID srcId = "0123456789", dstId = "9876543210"; EdgeType type = 1010; @@ -124,7 +124,7 @@ TEST_F(V2ShortTest, NegativeEdgeTypeTest) { PartitionID partId = 123; VertexID vId = "0123456789"; TagID tagId = 2020; - verifyVertex(partId, vId, tagId, 10); + verifyTag(partId, vId, tagId, 10); VertexID srcId = "0123456789", dstId = "9876543210"; EdgeType type = -1010; @@ -137,7 +137,7 @@ TEST_F(V2LongTest, SimpleTest) { PartitionID partId = 123; VertexID vId = "0123456789"; TagID tagId = 2020; - verifyVertex(partId, vId, tagId, 10); + verifyTag(partId, vId, tagId, 10); VertexID srcId = "0123456789", dstId = "9876543210"; EdgeType type = 1010; @@ -150,7 +150,7 @@ TEST_F(V2LongTest, NegativeEdgeTypeTest) { PartitionID partId = 123; VertexID vId = "0123456789"; TagID tagId = 2020; - verifyVertex(partId, vId, tagId, 10); + verifyTag(partId, vId, tagId, 10); VertexID srcId = "0123456789", dstId = "9876543210"; EdgeType type = -1010; diff --git a/src/kvstore/Part.cpp b/src/kvstore/Part.cpp index feb83df0768..8d8526904c1 100644 --- a/src/kvstore/Part.cpp +++ b/src/kvstore/Part.cpp @@ -457,7 +457,7 @@ bool Part::preProcessLog(LogID logId, TermID termId, ClusterID clusterId, const void Part::cleanup() { LOG(INFO) << idStr_ << "Clean rocksdb part data"; // Remove the vertex, edge, index, systemCommitKey, operation data under the part - const auto& vertexPre = NebulaKeyUtils::vertexPrefix(partId_); + const auto& vertexPre = NebulaKeyUtils::tagPrefix(partId_); auto ret = engine_->removeRange(NebulaKeyUtils::firstKey(vertexPre, vIdLen_), NebulaKeyUtils::lastKey(vertexPre, vIdLen_)); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/kvstore/plugins/elasticsearch/ESListener.cpp b/src/kvstore/plugins/elasticsearch/ESListener.cpp index 0e59b5227bd..b8f8702018f 100644 --- a/src/kvstore/plugins/elasticsearch/ESListener.cpp +++ b/src/kvstore/plugins/elasticsearch/ESListener.cpp @@ -44,7 +44,7 @@ void ESListener::init() { bool ESListener::apply(const std::vector& data) { std::vector docItems; for (const auto& kv : data) { - if (!nebula::NebulaKeyUtils::isVertex(vIdLen_, kv.first) && + if (!nebula::NebulaKeyUtils::isTag(vIdLen_, kv.first) && !nebula::NebulaKeyUtils::isEdge(vIdLen_, kv.first)) { continue; } diff --git a/src/kvstore/plugins/hbase/HBaseStore.cpp b/src/kvstore/plugins/hbase/HBaseStore.cpp index 81d2577ccc2..3d2e55270db 100644 --- a/src/kvstore/plugins/hbase/HBaseStore.cpp +++ b/src/kvstore/plugins/hbase/HBaseStore.cpp @@ -45,7 +45,7 @@ std::shared_ptr HBaseStore::getSchema(GraphSpaceID SchemaVer version) { std::shared_ptr schema; folly::StringPiece rawKey = key; - if (NebulaKeyUtils::isVertex(key)) { + if (NebulaKeyUtils::isTag(key)) { TagID tagId = NebulaKeyUtils::getTagId(rawKey); if (version == -1) { version = schemaMan_->getLatestTagSchemaVersion(spaceId, tagId).value(); diff --git a/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp b/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp index 94fddde5c9e..d4903503875 100644 --- a/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp +++ b/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp @@ -93,7 +93,7 @@ TEST(HBaseStoreTest, SimpleTest) { } EXPECT_EQ(expectedTotal, num); }; - std::string prefix1 = NebulaKeyUtils::vertexPrefix(partId, srcId); + std::string prefix1 = NebulaKeyUtils::tagPrefix(partId, srcId); checkPrefix(prefix1, 0, 20); std::string prefix2 = NebulaKeyUtils::edgePrefix(partId, srcId, edgeType); checkPrefix(prefix2, 0, 10); diff --git a/src/kvstore/test/NebulaListenerTest.cpp b/src/kvstore/test/NebulaListenerTest.cpp index 5d996625dd8..c02e925beb6 100644 --- a/src/kvstore/test/NebulaListenerTest.cpp +++ b/src/kvstore/test/NebulaListenerTest.cpp @@ -510,7 +510,7 @@ TEST_P(ListenerAdvanceTest, ListenerResetBySnapshotTest) { for (int32_t i = 0; i < 10; i++) { std::vector data; for (int32_t j = 0; j < 1000; j++) { - auto vKey = NebulaKeyUtils::vertexKey(8, partId, folly::to(i * 1000 + j), 5); + auto vKey = NebulaKeyUtils::tagKey(8, partId, folly::to(i * 1000 + j), 5); data.emplace_back(std::move(vKey), folly::stringPrintf("val_%d_%d", partId, i * 1000 + j)); } auto leader = findLeader(partId); @@ -584,7 +584,7 @@ TEST_P(ListenerSnapshotTest, SnapshotRateLimitTest) { for (int32_t i = 0; i < 10; i++) { std::vector data; for (int32_t j = 0; j < 1000; j++) { - auto vKey = NebulaKeyUtils::vertexKey(8, partId, folly::to(i * 1000 + j), 5); + auto vKey = NebulaKeyUtils::tagKey(8, partId, folly::to(i * 1000 + j), 5); data.emplace_back(std::move(vKey), std::string(24, 'X')); } auto leader = findLeader(partId); diff --git a/src/kvstore/test/NebulaStoreTest.cpp b/src/kvstore/test/NebulaStoreTest.cpp index 5eae7d68b19..ec8a4e56a91 100644 --- a/src/kvstore/test/NebulaStoreTest.cpp +++ b/src/kvstore/test/NebulaStoreTest.cpp @@ -950,7 +950,7 @@ TEST(NebulaStoreTest, BackupRestoreTest) { if (insertData) { std::vector data; for (auto tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(vIdLen, partId, "vertex", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(vIdLen, partId, "vertex", tagId), folly::stringPrintf("val_%d", tagId)); } folly::Baton baton; @@ -962,7 +962,7 @@ TEST(NebulaStoreTest, BackupRestoreTest) { } { - std::string prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, "vertex"); + std::string prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, "vertex"); std::unique_ptr iter; auto code = store->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); diff --git a/src/kvstore/test/PartTest.cpp b/src/kvstore/test/PartTest.cpp index 53ea164b16d..900f7bc8357 100644 --- a/src/kvstore/test/PartTest.cpp +++ b/src/kvstore/test/PartTest.cpp @@ -23,7 +23,7 @@ void checkVertexData(RocksEngine* engine, PartitionID partId, int expectNum, bool checkVal = false) { - std::string vertexPrefix = NebulaKeyUtils::vertexPrefix(partId); + std::string vertexPrefix = NebulaKeyUtils::tagPrefix(partId); std::unique_ptr iter; auto code = engine->prefix(vertexPrefix, &iter); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -105,16 +105,16 @@ TEST(PartTest, KeyOrderTest) { // build vertex data in part 1, 2 while (partId < 3) { - auto key1 = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "", 0); + auto key1 = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, "", 0); data.emplace_back(key1, folly::stringPrintf("val%d", 1)); - auto key2 = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "", INT_MAX); + auto key2 = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, "", INT_MAX); data.emplace_back(key2, folly::stringPrintf("val%d", 2)); - auto key3 = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "ffffff", INT_MAX, '\377'); + auto key3 = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, "ffffff", INT_MAX, '\377'); data.emplace_back(key3, folly::stringPrintf("val%d", 3)); - auto key4 = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "", INT_MAX, '\377'); + auto key4 = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, "", INT_MAX, '\377'); data.emplace_back(key4, folly::stringPrintf("val%d", 4)); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->multiPut(data)); @@ -141,12 +141,12 @@ TEST(PartTest, PartCleanTest) { while (partId < 3) { TagID tagId = 1; for (int i = 0; i < 10; i++) { - auto key = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, std::to_string(i), tagId); + auto key = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, std::to_string(i), tagId); data.emplace_back(key, folly::stringPrintf("val%d", i)); } tagId = 2; for (int i = 0; i < 10; i++) { - auto key = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, std::to_string(i), tagId); + auto key = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, std::to_string(i), tagId); data.emplace_back(key, folly::stringPrintf("val%d", i)); } @@ -196,7 +196,7 @@ TEST(PartTest, PartCleanTest) { // remove range part::clean data partId = 1; - const auto& vertexPre = NebulaKeyUtils::vertexPrefix(partId); + const auto& vertexPre = NebulaKeyUtils::tagPrefix(partId); auto ret = engine->removeRange(NebulaKeyUtils::firstKey(vertexPre, kDefaultVIdLen), NebulaKeyUtils::lastKey(vertexPre, kDefaultVIdLen)); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); diff --git a/src/kvstore/test/RocksEngineTest.cpp b/src/kvstore/test/RocksEngineTest.cpp index 9d325102bf4..a09264429d1 100644 --- a/src/kvstore/test/RocksEngineTest.cpp +++ b/src/kvstore/test/RocksEngineTest.cpp @@ -304,13 +304,13 @@ TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { auto writeVertex = [&](TagID tagId) { std::vector data; - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, vId, tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, vId, tagId), folly::stringPrintf("val_%d", tagId)); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, engine->multiPut(std::move(data))); }; auto readVertex = [&](TagID tagId) { - auto key = NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, vId, tagId); + auto key = NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, vId, tagId); std::string val; auto ret = engine->get(key, &val); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -321,7 +321,7 @@ TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { }; auto scanVertex = [&](VertexID id) { - auto prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, id); + auto prefix = NebulaKeyUtils::tagPrefix(kDefaultVIdLen, partId, id); std::unique_ptr iter; auto ret = engine->prefix(prefix, &iter); EXPECT_EQ(ret, nebula::cpp2::ErrorCode::SUCCEEDED); @@ -451,13 +451,13 @@ TEST_P(RocksEngineTest, PrefixBloomTest) { std::vector data; for (auto tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "1", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 1, "1", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "2", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 1, "2", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "3", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 2, "3", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "4", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 2, "4", tagId), folly::stringPrintf("val_%d", tagId)); } data.emplace_back(NebulaKeyUtils::systemCommitKey(1), "123"); @@ -467,7 +467,7 @@ TEST_P(RocksEngineTest, PrefixBloomTest) { { // vertexPrefix(partId) will not be included auto checkVertexPrefix = [&](PartitionID partId, const VertexID& vId) { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId); + std::string prefix = NebulaKeyUtils::tagPrefix(kDefaultVIdLen, partId, vId); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -486,7 +486,7 @@ TEST_P(RocksEngineTest, PrefixBloomTest) { { // vertexPrefix(partId) will be included auto checkPartPrefix = [&](PartitionID partId) { - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::string prefix = NebulaKeyUtils::tagPrefix(partId); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -503,7 +503,7 @@ TEST_P(RocksEngineTest, PrefixBloomTest) { { // vertexPrefix(partId) will be included auto checkRangeWithPartPrefix = [&](PartitionID partId) { - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::string prefix = NebulaKeyUtils::tagPrefix(partId); std::unique_ptr iter; auto code = engine->rangeWithPrefix(prefix, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -578,7 +578,7 @@ TEST(PlainTableTest, BackupRestoreWithData) { PartitionID partId = 1; auto checkData = [&] { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, "vertex"); + std::string prefix = NebulaKeyUtils::tagPrefix(kDefaultVIdLen, partId, "vertex"); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -598,7 +598,7 @@ TEST(PlainTableTest, BackupRestoreWithData) { LOG(INFO) << "Write some data"; std::vector data; for (auto tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, partId, "vertex", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, partId, "vertex", tagId), folly::stringPrintf("val_%d", tagId)); } data.emplace_back(NebulaKeyUtils::systemCommitKey(partId), "123"); @@ -635,7 +635,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { auto checkData = [&] { auto checkVertexPrefix = [&](PartitionID partId, VertexID vId) { { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId); + std::string prefix = NebulaKeyUtils::tagPrefix(kDefaultVIdLen, partId, vId); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -647,7 +647,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { EXPECT_EQ(num, 10); } for (TagID tagId = 0; tagId < 10; tagId++) { - std::string prefix = NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, partId, vId, tagId); + std::string prefix = NebulaKeyUtils::tagPrefix(kDefaultVIdLen, partId, vId, tagId); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -688,7 +688,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { }; auto checkVertexPartPrefix = [&](PartitionID partId) { - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::string prefix = NebulaKeyUtils::tagPrefix(partId); std::unique_ptr iter; auto code = engine->prefix(prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -714,7 +714,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { }; auto checkRangeWithPartPrefix = [&](PartitionID partId) { - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::string prefix = NebulaKeyUtils::tagPrefix(partId); std::unique_ptr iter; auto code = engine->rangeWithPrefix(prefix, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); @@ -762,13 +762,13 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { LOG(INFO) << "Write some data"; std::vector data; for (TagID tagId = 0; tagId < 10; tagId++) { - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "1", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 1, "1", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 1, "2", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 1, "2", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "3", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 2, "3", tagId), folly::stringPrintf("val_%d", tagId)); - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 2, "4", tagId), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 2, "4", tagId), folly::stringPrintf("val_%d", tagId)); } EdgeRanking rank = 0; @@ -790,7 +790,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { auto writeNewData = [&engine] { std::vector data; - data.emplace_back(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 3, "5", 0), + data.emplace_back(NebulaKeyUtils::tagKey(kDefaultVIdLen, 3, "5", 0), "vertex_data_after_enable_prefix_bloom_filter"); data.emplace_back(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 3, "5", 0, 0, "5"), "edge_data_after_enable_prefix_bloom_filter"); @@ -800,7 +800,7 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { auto checkNewData = [&engine] { std::string value; - auto code = engine->get(NebulaKeyUtils::vertexKey(kDefaultVIdLen, 3, "5", 0), &value); + auto code = engine->get(NebulaKeyUtils::tagKey(kDefaultVIdLen, 3, "5", 0), &value); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, code); EXPECT_EQ("vertex_data_after_enable_prefix_bloom_filter", value); code = engine->get(NebulaKeyUtils::edgeKey(kDefaultVIdLen, 3, "5", 0, 0, "5"), &value); @@ -818,9 +818,9 @@ TEST(RebuildPrefixBloomFilter, RebuildPrefixBloomFilter) { EXPECT_EQ(num, 1); }; - checkPrefix(NebulaKeyUtils::vertexPrefix(3)); - checkPrefix(NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, 3, "5")); - checkPrefix(NebulaKeyUtils::vertexPrefix(kDefaultVIdLen, 3, "5", 0)); + checkPrefix(NebulaKeyUtils::tagPrefix(3)); + checkPrefix(NebulaKeyUtils::tagPrefix(kDefaultVIdLen, 3, "5")); + checkPrefix(NebulaKeyUtils::tagPrefix(kDefaultVIdLen, 3, "5", 0)); checkPrefix(NebulaKeyUtils::edgePrefix(3)); checkPrefix(NebulaKeyUtils::edgePrefix(kDefaultVIdLen, 3, "5")); checkPrefix(NebulaKeyUtils::edgePrefix(kDefaultVIdLen, 3, "5", 0)); diff --git a/src/storage/CompactionFilter.h b/src/storage/CompactionFilter.h index f035ac3fa6a..6f95e3675fd 100644 --- a/src/storage/CompactionFilter.h +++ b/src/storage/CompactionFilter.h @@ -37,7 +37,7 @@ class StorageCompactionFilter final : public kvstore::KVFilter { return false; } - if (NebulaKeyUtils::isVertex(vIdLen_, key)) { + if (NebulaKeyUtils::isTag(vIdLen_, key)) { return !vertexValid(spaceId, key, val); } else if (NebulaKeyUtils::isEdge(vIdLen_, key)) { return !edgeValid(spaceId, key, val); diff --git a/src/storage/admin/RebuildTagIndexTask.cpp b/src/storage/admin/RebuildTagIndexTask.cpp index bc16b39eb81..4368d707053 100644 --- a/src/storage/admin/RebuildTagIndexTask.cpp +++ b/src/storage/admin/RebuildTagIndexTask.cpp @@ -52,7 +52,7 @@ nebula::cpp2::ErrorCode RebuildTagIndexTask::buildIndexGlobal(GraphSpaceID space auto vidSize = vidSizeRet.value(); std::unique_ptr iter; - auto prefix = NebulaKeyUtils::vertexPrefix(part); + auto prefix = NebulaKeyUtils::tagPrefix(part); auto ret = env_->kvstore_->prefix(space, part, prefix, &iter); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Processing Part " << part << " Failed"; diff --git a/src/storage/admin/StatsTask.cpp b/src/storage/admin/StatsTask.cpp index 91774fc8b5b..cf4d56811a6 100644 --- a/src/storage/admin/StatsTask.cpp +++ b/src/storage/admin/StatsTask.cpp @@ -96,7 +96,7 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, auto partitionNum = partitionNumRet.value(); LOG(INFO) << "Start statis task"; CHECK_NOTNULL(env_->kvstore_); - auto vertexPrefix = NebulaKeyUtils::vertexPrefix(part); + auto vertexPrefix = NebulaKeyUtils::tagPrefix(part); std::unique_ptr vertexIter; auto edgePrefix = NebulaKeyUtils::edgePrefix(part); std::unique_ptr edgeIter; diff --git a/src/storage/exec/IndexVertexScanNode.cpp b/src/storage/exec/IndexVertexScanNode.cpp index 499a4d59d8a..aa87a063301 100644 --- a/src/storage/exec/IndexVertexScanNode.cpp +++ b/src/storage/exec/IndexVertexScanNode.cpp @@ -53,10 +53,10 @@ ::nebula::cpp2::ErrorCode IndexVertexScanNode::init(InitContext& ctx) { nebula::cpp2::ErrorCode IndexVertexScanNode::getBaseData(folly::StringPiece key, std::pair& kv) { - kv.first = NebulaKeyUtils::vertexKey(context_->vIdLen(), - partId_, - key.subpiece(key.size() - context_->vIdLen()).toString(), - context_->tagId_); + kv.first = NebulaKeyUtils::tagKey(context_->vIdLen(), + partId_, + key.subpiece(key.size() - context_->vIdLen()).toString(), + context_->tagId_); return kvstore_->get(context_->spaceId(), partId_, kv.first, &kv.second); } diff --git a/src/storage/exec/ScanNode.h b/src/storage/exec/ScanNode.h index 3778eb87804..5af7912f758 100644 --- a/src/storage/exec/ScanNode.h +++ b/src/storage/exec/ScanNode.h @@ -43,7 +43,7 @@ class ScanVertexPropNode : public QueryNode { } std::string start; - std::string prefix = NebulaKeyUtils::vertexPrefix(partId); + std::string prefix = NebulaKeyUtils::tagPrefix(partId); if (cursor.empty()) { start = prefix; } else { diff --git a/src/storage/exec/TagNode.h b/src/storage/exec/TagNode.h index d6d597addc8..e2086738518 100644 --- a/src/storage/exec/TagNode.h +++ b/src/storage/exec/TagNode.h @@ -50,7 +50,7 @@ class TagNode final : public IterateNode { VLOG(1) << "partId " << partId << ", vId " << vId << ", tagId " << tagId_ << ", prop size " << props_->size(); - key_ = NebulaKeyUtils::vertexKey(context_->vIdLen(), partId, vId, tagId_); + key_ = NebulaKeyUtils::tagKey(context_->vIdLen(), partId, vId, tagId_); ret = context_->env()->kvstore_->get(context_->spaceId(), partId, key_, &value_); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { return doExecute(key_, value_); diff --git a/src/storage/exec/UpdateNode.h b/src/storage/exec/UpdateNode.h index e9ef3c9d072..21fa7153ee0 100644 --- a/src/storage/exec/UpdateNode.h +++ b/src/storage/exec/UpdateNode.h @@ -269,7 +269,7 @@ class UpdateTagNode : public UpdateNode { expCtx_->setTagProp(tagName_, p.first, p.second); } - key_ = NebulaKeyUtils::vertexKey(context_->vIdLen(), partId, vId, tagId_); + key_ = NebulaKeyUtils::tagKey(context_->vIdLen(), partId, vId, tagId_); rowWriter_ = std::make_unique(schema_); return nebula::cpp2::ErrorCode::SUCCEEDED; diff --git a/src/storage/mutate/AddVerticesProcessor.cpp b/src/storage/mutate/AddVerticesProcessor.cpp index cdd897e8373..2396014d841 100644 --- a/src/storage/mutate/AddVerticesProcessor.cpp +++ b/src/storage/mutate/AddVerticesProcessor.cpp @@ -90,7 +90,7 @@ void AddVerticesProcessor::doProcess(const cpp2::AddVerticesRequest& req) { break; } - auto key = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vid, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vid, tagId); if (ifNotExists_) { if (!visited.emplace(key).second) { continue; @@ -142,7 +142,7 @@ void AddVerticesProcessor::doProcessWithIndex(const cpp2::AddVerticesRequest& re dummyLock.reserve(vertices.size()); auto code = nebula::cpp2::ErrorCode::SUCCEEDED; - // cache vertexKey + // cache tagKey std::unordered_set visited; visited.reserve(vertices.size()); for (auto& vertex : vertices) { @@ -176,7 +176,7 @@ void AddVerticesProcessor::doProcessWithIndex(const cpp2::AddVerticesRequest& re break; } - auto key = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vid, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vid, tagId); if (ifNotExists_ && !visited.emplace(key).second) { continue; } @@ -307,7 +307,7 @@ void AddVerticesProcessor::doProcessWithIndex(const cpp2::AddVerticesRequest& re ErrorOr AddVerticesProcessor::findOldValue( PartitionID partId, const VertexID& vId, TagID tagId) { - auto key = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vId, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vId, tagId); std::string val; auto ret = env_->kvstore_->get(spaceId_, partId, key, &val); if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/storage/mutate/DeleteTagsProcessor.cpp b/src/storage/mutate/DeleteTagsProcessor.cpp index 5aae7132f7d..af414c55ba4 100644 --- a/src/storage/mutate/DeleteTagsProcessor.cpp +++ b/src/storage/mutate/DeleteTagsProcessor.cpp @@ -55,7 +55,7 @@ void DeleteTagsProcessor::process(const cpp2::DeleteTagsRequest& req) { for (const auto& entry : delTags) { const auto& vId = entry.get_id().getStr(); for (const auto& tagId : entry.get_tags()) { - auto key = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vId, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vId, tagId); keys.emplace_back(std::move(key)); } } @@ -94,7 +94,7 @@ ErrorOr DeleteTagsProcessor::deleteTags( for (const auto& entry : delTags) { const auto& vId = entry.get_id().getStr(); for (const auto& tagId : entry.get_tags()) { - auto key = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vId, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vId, tagId); auto tup = std::make_tuple(spaceId_, partId, tagId, vId); // ignore if there are duplicate delete if (std::find(lockedKeys.begin(), lockedKeys.end(), tup) != lockedKeys.end()) { diff --git a/src/storage/mutate/DeleteVerticesProcessor.cpp b/src/storage/mutate/DeleteVerticesProcessor.cpp index eb37ff8ce99..e9a6bae1840 100644 --- a/src/storage/mutate/DeleteVerticesProcessor.cpp +++ b/src/storage/mutate/DeleteVerticesProcessor.cpp @@ -62,7 +62,7 @@ void DeleteVerticesProcessor::process(const cpp2::DeleteVerticesRequest& req) { break; } - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid.getStr()); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid.getStr()); std::unique_ptr iter; code = env_->kvstore_->prefix(spaceId_, partId, prefix, &iter); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -112,7 +112,7 @@ ErrorOr DeleteVerticesProcessor::deleteVer target.reserve(vertices.size()); std::unique_ptr batchHolder = std::make_unique(); for (auto& vertex : vertices) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vertex.getStr()); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vertex.getStr()); std::unique_ptr iter; auto ret = env_->kvstore_->prefix(spaceId_, partId, prefix, &iter); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp b/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp index d5cd087c480..9a8bbc1f3b8 100644 --- a/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp +++ b/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp @@ -131,7 +131,7 @@ bool mockVertexData(storage::StorageEnv* ev, int32_t totalParts, int32_t vidLen, std::atomic count(1); std::vector data; - auto key = NebulaKeyUtils::vertexKey(vidLen, pId, vertex.vId_, vertex.tId_); + auto key = NebulaKeyUtils::tagKey(vidLen, pId, vertex.vId_, vertex.tId_); auto schema = ev->schemaMan_->getTagSchema(spaceId, vertex.tId_); if (!schema) { LOG(ERROR) << "Invalid tagId " << vertex.tId_; diff --git a/src/storage/test/CompactionTest.cpp b/src/storage/test/CompactionTest.cpp index c1345915513..3e6aef4ac0b 100644 --- a/src/storage/test/CompactionTest.cpp +++ b/src/storage/test/CompactionTest.cpp @@ -35,7 +35,7 @@ void checkTagVertexData(int32_t spaceVidLen, VertexID lastVertexId = ""; for (int part = 1; part <= parts; part++) { - auto prefix = NebulaKeyUtils::vertexPrefix(part); + auto prefix = NebulaKeyUtils::tagPrefix(part); auto ret = env->kvstore_->prefix(spaceId, part, prefix, &iter); ASSERT_EQ(ret, nebula::cpp2::ErrorCode::SUCCEEDED); diff --git a/src/storage/test/GetNeighborsBenchmark.cpp b/src/storage/test/GetNeighborsBenchmark.cpp index ac2157984cc..d18c97224b9 100644 --- a/src/storage/test/GetNeighborsBenchmark.cpp +++ b/src/storage/test/GetNeighborsBenchmark.cpp @@ -250,7 +250,7 @@ void prefix(int32_t iters, { // read tags std::unique_ptr iter; - auto prefix = nebula::NebulaKeyUtils::vertexPrefix(vIdLen, partId, vId, player); + auto prefix = nebula::NebulaKeyUtils::tagKey(vIdLen, partId, vId, player); auto code = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); CHECK(iter->valid()); diff --git a/src/storage/test/GetPropTest.cpp b/src/storage/test/GetPropTest.cpp index 59d3bb6e442..1ed42d03a1f 100644 --- a/src/storage/test/GetPropTest.cpp +++ b/src/storage/test/GetPropTest.cpp @@ -668,7 +668,7 @@ TEST(QueryVertexPropsTest, PrefixBloomFilterTest) { for (const auto& vId : vertices) { PartitionID partId = (hash(vId) % totalParts) + 1; std::unique_ptr iter; - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, vId, player); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, vId, player); auto code = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); } diff --git a/src/storage/test/IndexScanLimitTest.cpp b/src/storage/test/IndexScanLimitTest.cpp index bb57aef7ff0..83e678c0d4c 100644 --- a/src/storage/test/IndexScanLimitTest.cpp +++ b/src/storage/test/IndexScanLimitTest.cpp @@ -118,9 +118,9 @@ class IndexScanLimitTest : public ::testing::Test { std::string val = vid % 2 == 0 ? val1 : val2; auto vertex = folly::to(vid); auto edgeKey = NebulaKeyUtils::edgeKey(vertexLen, pId, vertex, edgeType, 0, vertex); - auto vertexKey = NebulaKeyUtils::vertexKey(vertexLen, pId, vertex, tagId); + auto tagKey = NebulaKeyUtils::tagKey(vertexLen, pId, vertex, tagId); data.emplace_back(std::move(edgeKey), val); - data.emplace_back(std::move(vertexKey), std::move(val)); + data.emplace_back(std::move(tagKey), std::move(val)); if (indexMan_ != nullptr) { if (indexMan_->getTagIndex(spaceId, tagIndex).ok()) { auto vertexIndexKeys = diff --git a/src/storage/test/IndexTest.cpp b/src/storage/test/IndexTest.cpp index 6c4e34ad122..2b269da938f 100644 --- a/src/storage/test/IndexTest.cpp +++ b/src/storage/test/IndexTest.cpp @@ -152,7 +152,7 @@ class IndexScanTest : public ::testing::Test { std::vector> indices) { std::vector> ret(indices.size() + 1); for (size_t i = 0; i < rows.size(); i++) { - auto key = NebulaKeyUtils::vertexKey(8, 0, std::to_string(i), tagId); + auto key = NebulaKeyUtils::tagKey(8, 0, std::to_string(i), tagId); RowWriterV2 writer(schema.get()); for (size_t j = 0; j < rows[i].size(); j++) { writer.setValue(j, rows[i][j]); diff --git a/src/storage/test/IndexWithTTLTest.cpp b/src/storage/test/IndexWithTTLTest.cpp index 0d61c3248de..88c16d49737 100644 --- a/src/storage/test/IndexWithTTLTest.cpp +++ b/src/storage/test/IndexWithTTLTest.cpp @@ -164,7 +164,7 @@ TEST(IndexWithTTLTest, AddVerticesIndexWithTTL) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -184,7 +184,7 @@ TEST(IndexWithTTLTest, AddVerticesIndexWithTTL) { LOG(INFO) << "Check data after compaction ..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(0, retNum); } @@ -258,7 +258,7 @@ TEST(IndexWithTTLTest, UpdateVerticesIndexWithTTL) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -302,7 +302,7 @@ TEST(IndexWithTTLTest, UpdateVerticesIndexWithTTL) { LOG(INFO) << "Check data after update ..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -405,7 +405,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTL) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -448,7 +448,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTL) { LOG(INFO) << "Check data after rebuild ..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -543,7 +543,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTLExpired) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -588,7 +588,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTLExpired) { LOG(INFO) << "Check data after rebuild ..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } diff --git a/src/storage/test/IndexWriteTest.cpp b/src/storage/test/IndexWriteTest.cpp index 1aec5beb400..91f30a0f53d 100644 --- a/src/storage/test/IndexWriteTest.cpp +++ b/src/storage/test/IndexWriteTest.cpp @@ -91,7 +91,7 @@ TEST(IndexTest, SimpleVerticesTest) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -120,7 +120,7 @@ TEST(IndexTest, SimpleVerticesTest) { LOG(INFO) << "Check delete data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(0, retNum); } @@ -286,7 +286,7 @@ TEST(IndexTest, VerticesValueTest) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); auto retNum = verifyResultNum(1, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -398,7 +398,7 @@ TEST(IndexTest, AlterTagIndexTest) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); auto retNum = verifyResultNum(spaceId, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } @@ -460,7 +460,7 @@ TEST(IndexTest, AlterTagIndexTest) { LOG(INFO) << "Check insert data..."; for (auto partId = 1; partId <= 6; partId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, convertVertexId(vIdLen, partId)); auto retNum = verifyResultNum(spaceId, partId, prefix, env->kvstore_); EXPECT_EQ(1, retNum); } diff --git a/src/storage/test/LookupIndexTest.cpp b/src/storage/test/LookupIndexTest.cpp index 2c883faddff..4f96001b67f 100644 --- a/src/storage/test/LookupIndexTest.cpp +++ b/src/storage/test/LookupIndexTest.cpp @@ -64,7 +64,7 @@ TEST_P(LookupIndexTest, LookupIndexTestV1) { RowWriterV1 writer(schemaV1.get()); writer << true << 1L << 1.1F << 1.1F << "row1"; writer.encode(); - auto key = NebulaKeyUtils::vertexKey(vIdLen.value(), 1, vId1, 3); + auto key = NebulaKeyUtils::tagKey(vIdLen.value(), 1, vId1, 3); keyValues.emplace_back(std::move(key), writer.encode()); // setup V2 row @@ -85,7 +85,7 @@ TEST_P(LookupIndexTest, LookupIndexTestV1) { writer2.setValue("col_date", date); writer2.setValue("col_datetime", dt); writer2.finish(); - key = NebulaKeyUtils::vertexKey(vIdLen.value(), 1, vId2, 3); + key = NebulaKeyUtils::tagKey(vIdLen.value(), 1, vId2, 3); keyValues.emplace_back(std::move(key), writer2.moveEncodedStr()); // setup index key diff --git a/src/storage/test/MemoryLockBenchmark.cpp b/src/storage/test/MemoryLockBenchmark.cpp index ac1aaf6f2b6..cc33755577d 100644 --- a/src/storage/test/MemoryLockBenchmark.cpp +++ b/src/storage/test/MemoryLockBenchmark.cpp @@ -36,7 +36,7 @@ void forString(StringLock* lock, int64_t spaceId) noexcept { size_t vIdLen = 32; for (int32_t j = 0; j < FLAGS_num_batch; j++) { toLock.emplace_back(folly::to(spaceId) + - NebulaKeyUtils::vertexKey(vIdLen, j, folly::to(j), j)); + NebulaKeyUtils::tagKey(vIdLen, j, folly::to(j), j)); } nebula::MemoryLockGuard lg(lock, std::move(toLock)); } diff --git a/src/storage/test/PrefixBloomFilterBenchmark.cpp b/src/storage/test/PrefixBloomFilterBenchmark.cpp index 7140fbde060..f4f6750cccf 100644 --- a/src/storage/test/PrefixBloomFilterBenchmark.cpp +++ b/src/storage/test/PrefixBloomFilterBenchmark.cpp @@ -28,7 +28,7 @@ void mockData(StorageEnv* env, int32_t partCount) { vertexId < (partId + 1) * FLAGS_vertex_per_part; vertexId++) { for (TagID tagId = 3001; tagId < 3010; tagId++) { - auto key = NebulaKeyUtils::vertexKey(vIdLen, partId, std::to_string(vertexId), tagId); + auto key = NebulaKeyUtils::tagKey(vIdLen, partId, std::to_string(vertexId), tagId); auto val = folly::stringPrintf("%d_%d", vertexId, tagId); data.emplace_back(std::move(key), std::move(val)); } @@ -53,7 +53,7 @@ void testPrefixSeek(StorageEnv* env, int32_t partCount, int32_t iters) { for (int32_t vertexId = partId * FLAGS_vertex_per_part; vertexId < (partId + 1) * FLAGS_vertex_per_part; vertexId++) { - auto prefix = NebulaKeyUtils::vertexPrefix(vIdLen, partId, std::to_string(vertexId)); + auto prefix = NebulaKeyUtils::tagPrefix(vIdLen, partId, std::to_string(vertexId)); std::unique_ptr iter; auto code = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(code, nebula::cpp2::ErrorCode::SUCCEEDED); diff --git a/src/storage/test/QueryStatsTest.cpp b/src/storage/test/QueryStatsTest.cpp index 04d26977fbd..b8cd99ba942 100644 --- a/src/storage/test/QueryStatsTest.cpp +++ b/src/storage/test/QueryStatsTest.cpp @@ -21,7 +21,7 @@ void mockData(kvstore::KVStore* kv) { std::vector data; for (int32_t vertexId = partId * 10; vertexId < (partId + 1) * 10; vertexId++) { for (int32_t tagId = 3001; tagId < 3010; tagId++) { - auto key = NebulaKeyUtils::vertexKey(partId, vertexId, tagId, 0); + auto key = NebulaKeyUtils::tagKey(partId, vertexId, tagId, 0); auto val = TestUtils::setupEncode(3, 6); data.emplace_back(std::move(key), std::move(val)); } diff --git a/src/storage/test/QueryTestUtils.h b/src/storage/test/QueryTestUtils.h index 24556c2e1fd..5b9d5d103c3 100644 --- a/src/storage/test/QueryTestUtils.h +++ b/src/storage/test/QueryTestUtils.h @@ -45,7 +45,7 @@ class QueryTestUtils { for (const auto& vertex : vertices) { PartitionID partId = (hash(vertex.vId_) % totalParts) + 1; TagID tagId = vertex.tId_; - auto key = NebulaKeyUtils::vertexKey(spaceVidLen, partId, vertex.vId_, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen, partId, vertex.vId_, tagId); auto schema = env->schemaMan_->getTagSchema(spaceId, tagId); if (!schema) { LOG(ERROR) << "Invalid tagId " << tagId; diff --git a/src/storage/test/RebuildIndexTest.cpp b/src/storage/test/RebuildIndexTest.cpp index 8661811ea92..ec598d52849 100644 --- a/src/storage/test/RebuildIndexTest.cpp +++ b/src/storage/test/RebuildIndexTest.cpp @@ -101,7 +101,7 @@ TEST_F(RebuildIndexTest, RebuildTagIndexCheckALLData) { EXPECT_LT(0, vidSize); int dataNum = 0; for (auto part : parts) { - auto prefix = NebulaKeyUtils::vertexPrefix(part); + auto prefix = NebulaKeyUtils::tagPrefix(part); std::unique_ptr iter; auto ret = RebuildIndexTest::env_->kvstore_->prefix(1, part, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); diff --git a/src/storage/test/StatsTaskTest.cpp b/src/storage/test/StatsTaskTest.cpp index 6ba49a74f9f..f267df6eb82 100644 --- a/src/storage/test/StatsTaskTest.cpp +++ b/src/storage/test/StatsTaskTest.cpp @@ -299,7 +299,7 @@ TEST_F(StatsTaskTest, StatsTagAndEdgeData) { VertexID lastDstVertexId = ""; EdgeRanking lastRank = 0; - auto prefix = NebulaKeyUtils::vertexPrefix(part); + auto prefix = NebulaKeyUtils::tagPrefix(part); std::unique_ptr iter; auto ret = env_->kvstore_->prefix(spaceId, part, prefix, &iter); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/storage/test/StorageIndexWriteBenchmark.cpp b/src/storage/test/StorageIndexWriteBenchmark.cpp index 0571951f32a..dde6bdadd00 100644 --- a/src/storage/test/StorageIndexWriteBenchmark.cpp +++ b/src/storage/test/StorageIndexWriteBenchmark.cpp @@ -176,7 +176,7 @@ void initEnv(IndexENV type, } void verifyDataCount(storage::StorageEnv* env, int32_t expected) { - auto prefix = NebulaKeyUtils::vertexPrefix(1); + auto prefix = NebulaKeyUtils::tagPrefix(1); std::unique_ptr iter; auto status = env->kvstore_->prefix(spaceId, 1, prefix, &iter); DCHECK(nebula::cpp2::ErrorCode::SUCCEEDED == status); diff --git a/src/storage/test/TestUtils.h b/src/storage/test/TestUtils.h index 49a004c8454..d82bbd8ca9f 100644 --- a/src/storage/test/TestUtils.h +++ b/src/storage/test/TestUtils.h @@ -41,7 +41,7 @@ void checkAddVerticesData(cpp2::AddVerticesRequest req, for (auto& newTag : newTagVec) { auto tagId = newTag.get_tag_id(); - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vid.getStr(), tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vid.getStr(), tagId); std::unique_ptr iter; EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, env->kvstore_->prefix(spaceId, partId, prefix, &iter)); @@ -152,7 +152,7 @@ void checkVerticesData(int32_t spaceVidLen, auto partId = part.first; auto deleteVidVec = part.second; for (auto& vid : deleteVidVec) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vid.getStr()); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vid.getStr()); std::unique_ptr iter; EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, env->kvstore_->prefix(spaceId, partId, prefix, &iter)); diff --git a/src/storage/test/UpdateVertexTest.cpp b/src/storage/test/UpdateVertexTest.cpp index 32b3d1a9eb3..1b6558ca593 100644 --- a/src/storage/test/UpdateVertexTest.cpp +++ b/src/storage/test/UpdateVertexTest.cpp @@ -58,7 +58,7 @@ static bool mockVertexData(storage::StorageEnv* env, int32_t totalParts, int32_t data.clear(); for (const auto& vertex : part.second) { TagID tagId = vertex.tId_; - auto key = NebulaKeyUtils::vertexKey(spaceVidLen, part.first, vertex.vId_, tagId); + auto key = NebulaKeyUtils::tagKey(spaceVidLen, part.first, vertex.vId_, tagId); auto schema = env->schemaMan_->getTagSchema(spaceId, tagId); if (!schema) { LOG(ERROR) << "Invalid tagId " << tagId; @@ -184,7 +184,7 @@ TEST(UpdateVertexTest, No_Filter_Test) { EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[5].getInt()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -303,7 +303,7 @@ TEST(UpdateVertexTest, Filter_Yield_Test2) { // get player from kvstore directly // Because no update, the value is old - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -406,7 +406,7 @@ TEST(UpdateVertexTest, Insertable_Test) { EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[5].getInt()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -487,7 +487,7 @@ TEST(UpdateVertexTest, Invalid_Update_Prop_Test) { // get player from kvstore directly // Because no update, the value is old - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -587,7 +587,7 @@ TEST(UpdateVertexTest, Invalid_Filter_Test) { // get player from kvstore directly // Because no update, the value is old - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -704,7 +704,7 @@ TEST(UpdateVertexTest, Insertable_Filter_Value_Test) { EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[5].getInt()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -737,7 +737,7 @@ TEST(UpdateVertexTest, CorruptDataTest) { auto partId = std::hash()("Lonzo Ball") % parts + 1; VertexID vertexId("Lonzo Ball"); - auto key = NebulaKeyUtils::vertexKey(spaceVidLen, partId, vertexId, 1); + auto key = NebulaKeyUtils::tagKey(spaceVidLen, partId, vertexId, 1); std::vector data; data.emplace_back(std::make_pair(key, "")); folly::Baton<> baton; @@ -954,7 +954,7 @@ TEST(UpdateVertexTest, TTL_Insert_No_Exist_Test) { EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[5].getInt()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -1075,7 +1075,7 @@ TEST(UpdateVertexTest, TTL_Insert_Test) { // Get player from kvstore directly, ttl expired data can be readed // First record is inserted record data // Second record is expired ttl data - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -1256,7 +1256,7 @@ TEST(UpdateVertexTest, Insertable_In_Set_Test) { EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[4].getInt()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); EXPECT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -1328,7 +1328,7 @@ TEST(UpdateVertexTest, Update_Multi_tag_Test) { EXPECT_EQ(1, (*resp.result_ref()).failed_parts.size()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -1398,7 +1398,7 @@ TEST(UpdateVertexTest, Upsert_Multi_tag_Test) { EXPECT_EQ(1, (*resp.result_ref()).failed_parts.size()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -1467,7 +1467,7 @@ TEST(UpdateVertexTest, Upsert_Field_Type_And_Value_Match_Test) { EXPECT_EQ(1, (*resp.result_ref()).failed_parts.size()); // get player from kvstore directly - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen, partId, vertexId, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); std::unique_ptr iter; auto ret = env->kvstore_->prefix(spaceId, partId, prefix, &iter); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, ret); diff --git a/src/tools/db-dump/DbDumper.cpp b/src/tools/db-dump/DbDumper.cpp index 3ae9d40fd0d..8be44a822e9 100644 --- a/src/tools/db-dump/DbDumper.cpp +++ b/src/tools/db-dump/DbDumper.cpp @@ -219,7 +219,7 @@ void DbDumper::run() { continue; } auto partId = metaClient_->partId(partNum_, vid); - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid); seek(prefix); } break; @@ -246,7 +246,7 @@ void DbDumper::run() { } auto partId = metaClient_->partId(partNum_, vid); for (auto tagId : tagIds_) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid, tagId); seek(prefix); } } @@ -271,7 +271,7 @@ void DbDumper::run() { } auto partId = metaClient_->partId(partNum_, vid); for (auto tagId : tagIds_) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid, tagId); seek(prefix); } } @@ -280,7 +280,7 @@ void DbDumper::run() { case 0b1000: { // specified part, seek with prefix and print them all for (auto partId : parts_) { - auto vertexPrefix = NebulaKeyUtils::vertexPrefix(partId); + auto vertexPrefix = NebulaKeyUtils::tagPrefix(partId); seek(vertexPrefix); auto edgePrefix = NebulaKeyUtils::edgePrefix(partId); seek(edgePrefix); @@ -302,7 +302,7 @@ void DbDumper::run() { beforePrintVertex_.emplace_back(printIfTagFound); beforePrintEdge_.emplace_back(noPrint); for (auto partId : parts_) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); seek(prefix); } break; @@ -321,7 +321,7 @@ void DbDumper::run() { beforePrintVertex_.emplace_back(printIfTagFound); beforePrintEdge_.emplace_back(noPrint); for (auto partId : parts_) { - auto prefix = NebulaKeyUtils::vertexPrefix(partId); + auto prefix = NebulaKeyUtils::tagPrefix(partId); seek(prefix); } break; @@ -333,7 +333,7 @@ void DbDumper::run() { if (!isValidVidLen(vid)) { continue; } - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid); seek(prefix); } } @@ -362,7 +362,7 @@ void DbDumper::run() { continue; } for (auto tagId : tagIds_) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid, tagId); seek(prefix); } } @@ -389,7 +389,7 @@ void DbDumper::run() { continue; } for (auto tagId : tagIds_) { - auto prefix = NebulaKeyUtils::vertexPrefix(spaceVidLen_, partId, vid, tagId); + auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen_, partId, vid, tagId); seek(prefix); } } @@ -440,7 +440,7 @@ void DbDumper::iterates(kvstore::RocksPrefixIter* it) { auto key = it->key(); auto value = it->val(); - if (NebulaKeyUtils::isVertex(spaceVidLen_, key)) { + if (NebulaKeyUtils::isTag(spaceVidLen_, key)) { // filter the data bool isFiltered = false; for (auto& cb : beforePrintVertex_) { diff --git a/src/tools/db-upgrade/DbUpgrader.cpp b/src/tools/db-upgrade/DbUpgrader.cpp index 3628cb7474f..76b8019fece 100644 --- a/src/tools/db-upgrade/DbUpgrader.cpp +++ b/src/tools/db-upgrade/DbUpgrader.cpp @@ -269,7 +269,7 @@ void UpgraderSpace::runPartV1() { auto strVid = std::string(reinterpret_cast(&vId), sizeof(vId)); auto newTagSchema = it->second.back().get(); // Generate 2.0 key - auto newKey = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, strVid, tagId); + auto newKey = NebulaKeyUtils::tagKey(spaceVidLen_, partId, strVid, tagId); auto val = iter->val(); auto reader = RowReaderWrapper::getTagPropReader(schemaMan_, spaceId_, tagId, val); if (!reader) { @@ -482,7 +482,7 @@ void UpgraderSpace::runPartV2() { auto newTagSchema = it->second.back().get(); // Generate 2.0 key - auto newKey = NebulaKeyUtils::vertexKey(spaceVidLen_, partId, vId, tagId); + auto newKey = NebulaKeyUtils::tagKey(spaceVidLen_, partId, vId, tagId); auto val = iter->val(); auto reader = RowReaderWrapper::getTagPropReader(schemaMan_, spaceId_, tagId, val); if (!reader) { diff --git a/src/tools/db-upgrade/NebulaKeyUtilsV2.cpp b/src/tools/db-upgrade/NebulaKeyUtilsV2.cpp index 4891adb3f98..8a1a0f429c6 100644 --- a/src/tools/db-upgrade/NebulaKeyUtilsV2.cpp +++ b/src/tools/db-upgrade/NebulaKeyUtilsV2.cpp @@ -16,7 +16,7 @@ bool NebulaKeyUtilsV2::isValidVidLen(size_t vIdLen, VertexID srcVId, VertexID ds } // static -std::string NebulaKeyUtilsV2::vertexKey( +std::string NebulaKeyUtilsV2::tagKey( size_t vIdLen, PartitionID partId, VertexID vId, TagID tagId, TagVersion tv) { CHECK_GE(vIdLen, vId.size()); tagId &= kTagMaskSet; diff --git a/src/tools/db-upgrade/NebulaKeyUtilsV2.h b/src/tools/db-upgrade/NebulaKeyUtilsV2.h index 1f7a79b6556..533e448ac0f 100644 --- a/src/tools/db-upgrade/NebulaKeyUtilsV2.h +++ b/src/tools/db-upgrade/NebulaKeyUtilsV2.h @@ -50,9 +50,9 @@ class NebulaKeyUtilsV2 final { static bool isValidVidLen(size_t vIdLen, VertexID srcvId, VertexID dstvId = ""); /** - * Generate vertex key for kv store + * Generate tag key for kv store * */ - static std::string vertexKey( + static std::string tagKey( size_t vIdLen, PartitionID partId, VertexID vId, TagID tagId, TagVersion tv); /** From 9ebc49d5bb67a3643d83655492c9553a9624eaf2 Mon Sep 17 00:00:00 2001 From: Doodle <13706157+critical27@users.noreply.github.com> Date: Wed, 17 Nov 2021 19:41:24 -0600 Subject: [PATCH 23/53] [Raft] make me crazy (#3172) * cherry * minor log changes, fix a part is reset more than expected during waiting for snapshot * optimize when rpc timeout * * no more weird "has been sended" and "self->lastLogIdSent_ = self->logIdToSend_ - 1;" * do not send pending wals when leader is sending a snapshot * unify E_LOG_GAP and E_LOG_STALE * wal_not_found * * getPendingReqIfAny * unify SUCCEEDED E_LOG_GAP E_LOG_STALE * election don't wait forever * handle elelction resp in worker thread * fix deadlock * resolve conflicts, simplify, add a bit more comments * fix pytest * fix pytest again Co-authored-by: yaphet <4414314+darionyaphet@users.noreply.github.com> --- conf/nebula-storaged.conf.production | 2 +- src/common/base/SlowOpTracker.cpp | 2 +- src/interface/raftex.thrift | 40 ++- src/kvstore/NebulaSnapshotManager.cpp | 4 +- src/kvstore/raftex/Host.cpp | 345 +++++++++------------- src/kvstore/raftex/Host.h | 23 +- src/kvstore/raftex/RaftPart.cpp | 307 ++++++++++--------- src/kvstore/raftex/RaftPart.h | 10 +- src/kvstore/raftex/SnapshotManager.cpp | 11 +- src/kvstore/raftex/SnapshotManager.h | 3 +- src/kvstore/raftex/test/CMakeLists.txt | 15 - src/kvstore/raftex/test/SnapshotTest.cpp | 100 ------- src/kvstore/wal/FileBasedWal.cpp | 14 +- src/kvstore/wal/FileBasedWal.h | 3 + src/kvstore/wal/Wal.h | 3 + src/kvstore/wal/WalFileIterator.cpp | 4 +- src/kvstore/wal/test/FileBasedWalTest.cpp | 34 +++ tests/admin/test_configs.py | 4 +- 18 files changed, 385 insertions(+), 539 deletions(-) delete mode 100644 src/kvstore/raftex/test/SnapshotTest.cpp diff --git a/conf/nebula-storaged.conf.production b/conf/nebula-storaged.conf.production index f41a16b7d21..8789ebdd08a 100644 --- a/conf/nebula-storaged.conf.production +++ b/conf/nebula-storaged.conf.production @@ -102,7 +102,7 @@ --enable_rocksdb_whole_key_filtering=false ############### misc #################### ---snapshot_part_rate_limit=8388608 +--snapshot_part_rate_limit=10485760 --snapshot_batch_size=1048576 --rebuild_index_part_rate_limit=4194304 --rebuild_index_batch_size=1048576 diff --git a/src/common/base/SlowOpTracker.cpp b/src/common/base/SlowOpTracker.cpp index 5a81c7db52d..debdaa048c2 100644 --- a/src/common/base/SlowOpTracker.cpp +++ b/src/common/base/SlowOpTracker.cpp @@ -8,4 +8,4 @@ #include "common/base/Base.h" #include "common/time/WallClock.h" -DEFINE_int64(slow_op_threshhold_ms, 50, "default threshhold for slow operation"); +DEFINE_int64(slow_op_threshhold_ms, 100, "default threshhold for slow operation"); diff --git a/src/interface/raftex.thrift b/src/interface/raftex.thrift index 20972b82af7..d59bd568ff6 100644 --- a/src/interface/raftex.thrift +++ b/src/interface/raftex.thrift @@ -10,29 +10,27 @@ cpp_include "common/thrift/ThriftTypes.h" enum ErrorCode { SUCCEEDED = 0; - E_LOG_GAP = -1; - E_LOG_STALE = -2; - E_MISSING_COMMIT = -3; - E_WAITING_SNAPSHOT = -4; // The follower is waiting a snapshot - - E_UNKNOWN_PART = -5; - E_TERM_OUT_OF_DATE = -6; - E_LAST_LOG_TERM_TOO_OLD = -7; - E_BAD_STATE = -8; - E_WRONG_LEADER = -9; - E_WAL_FAIL = -10; - E_NOT_READY = -11; + E_UNKNOWN_PART = -1; - // Local errors - E_HOST_STOPPED = -12; - E_NOT_A_LEADER = -13; - E_HOST_DISCONNECTED = -14; - E_TOO_MANY_REQUESTS = -15; - E_PERSIST_SNAPSHOT_FAILED = -16; + // Raft consensus errors + E_LOG_GAP = -2; + E_LOG_STALE = -3; + E_TERM_OUT_OF_DATE = -4; - E_BAD_ROLE = -17, + // Raft state errors + E_WAITING_SNAPSHOT = -5; // The follower is waiting a snapshot + E_BAD_STATE = -6; + E_WRONG_LEADER = -7; + E_NOT_READY = -8; + E_BAD_ROLE = -9, - E_EXCEPTION = -20; // An thrift internal exception was thrown + // Local errors + E_WAL_FAIL = -10; + E_HOST_STOPPED = -11; + E_TOO_MANY_REQUESTS = -12; + E_PERSIST_SNAPSHOT_FAILED = -13; + E_RPC_EXCEPTION = -14; // An thrift internal exception was thrown + E_NO_WAL_FOUND = -15; } typedef i64 (cpp.type = "nebula::ClusterID") ClusterID @@ -103,8 +101,6 @@ struct AppendLogRequest { // 10: TermID log_term; 11: list log_str_list; - - 12: bool sending_snapshot; } diff --git a/src/kvstore/NebulaSnapshotManager.cpp b/src/kvstore/NebulaSnapshotManager.cpp index 6a2fd3ec8fc..f9401ca04fb 100644 --- a/src/kvstore/NebulaSnapshotManager.cpp +++ b/src/kvstore/NebulaSnapshotManager.cpp @@ -10,7 +10,7 @@ #include "kvstore/RateLimiter.h" DEFINE_uint32(snapshot_part_rate_limit, - 1024 * 1024 * 8, + 1024 * 1024 * 10, "max bytes of pulling snapshot for each partition in one second"); DEFINE_uint32(snapshot_batch_size, 1024 * 512, "batch size for snapshot, in bytes"); @@ -21,7 +21,7 @@ const int32_t kReserveNum = 1024 * 4; NebulaSnapshotManager::NebulaSnapshotManager(NebulaStore* kv) : store_(kv) { // Snapshot rate is limited to FLAGS_snapshot_worker_threads * FLAGS_snapshot_part_rate_limit. - // So by default, the total send rate is limited to 4 * 8Mb = 32Mb. + // So by default, the total send rate is limited to 4 * 10Mb = 40Mb. LOG(INFO) << "Send snapshot is rate limited to " << FLAGS_snapshot_part_rate_limit << " for each part by default"; } diff --git a/src/kvstore/raftex/Host.cpp b/src/kvstore/raftex/Host.cpp index 20d8a400050..ab79d2b9195 100644 --- a/src/kvstore/raftex/Host.cpp +++ b/src/kvstore/raftex/Host.cpp @@ -50,11 +50,6 @@ cpp2::ErrorCode Host::checkStatus() const { return cpp2::ErrorCode::E_HOST_STOPPED; } - if (paused_) { - VLOG(2) << idStr_ << "The host is paused, due to losing leadership"; - return cpp2::ErrorCode::E_NOT_A_LEADER; - } - return cpp2::ErrorCode::SUCCEEDED; } @@ -70,8 +65,7 @@ folly::Future Host::askForVote(const cpp2::AskForVoteR return resp; } } - auto client = - part_->clientMan_->client(addr_, eb, false, FLAGS_raft_heartbeat_interval_secs * 1000); + auto client = part_->clientMan_->client(addr_, eb, false, FLAGS_raft_rpc_timeout_ms); return client->future_askForVote(req); } @@ -89,23 +83,18 @@ folly::Future Host::appendLogs(folly::EventBase* eb, std::lock_guard g(lock_); auto res = checkStatus(); - if (logId <= lastLogIdSent_) { - LOG(INFO) << idStr_ << "The log " << logId << " has been sended" - << ", lastLogIdSent " << lastLogIdSent_; - cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::SUCCEEDED); - return r; - } - if (requestOnGoing_ && res == cpp2::ErrorCode::SUCCEEDED) { + if (UNLIKELY(sendingSnapshot_)) { + LOG_EVERY_N(INFO, 500) << idStr_ << "The target host is waiting for a snapshot"; + res = cpp2::ErrorCode::E_WAITING_SNAPSHOT; + } else if (requestOnGoing_) { + // buffer incoming request to pendingReq_ if (cachingPromise_.size() <= FLAGS_max_outstanding_requests) { pendingReq_ = std::make_tuple(term, logId, committedLogId); return cachingPromise_.getFuture(); } else { LOG_EVERY_N(INFO, 200) << idStr_ << "Too many requests are waiting, return error"; - cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::E_TOO_MANY_REQUESTS); - return r; + res = cpp2::ErrorCode::E_TOO_MANY_REQUESTS; } } @@ -129,14 +118,23 @@ folly::Future Host::appendLogs(folly::EventBase* eb, logTermToSend_ = term; logIdToSend_ = logId; committedLogId_ = committedLogId; - pendingReq_ = std::make_tuple(0, 0, 0); - promise_ = std::move(cachingPromise_); - cachingPromise_ = folly::SharedPromise(); - ret = promise_.getFuture(); - - requestOnGoing_ = true; - req = prepareAppendLogRequest(); + auto result = prepareAppendLogRequest(); + if (ok(result)) { + LOG_IF(INFO, FLAGS_trace_raft) << idStr_ << "Sending the pending request in the queue" + << ", from " << lastLogIdSent_ + 1 << " to " << logIdToSend_; + req = std::move(value(result)); + pendingReq_ = std::make_tuple(0, 0, 0); + promise_ = std::move(cachingPromise_); + cachingPromise_ = folly::SharedPromise(); + ret = promise_.getFuture(); + requestOnGoing_ = true; + } else { + // target host is waiting for a snapshot or wal not found + cpp2::AppendLogResponse r; + r.set_error_code(error(result)); + return r; + } } // Get a new promise @@ -152,6 +150,7 @@ void Host::setResponse(const cpp2::AppendLogResponse& r) { cachingPromise_ = folly::SharedPromise(); pendingReq_ = std::make_tuple(0, 0, 0); requestOnGoing_ = false; + noMoreRequestCV_.notify_all(); } void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr req) { @@ -167,7 +166,9 @@ void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptrlastLogIdSent_ << ", lastLogTermSent_ " << self->lastLogTermSent_; switch (resp.get_error_code()) { - case cpp2::ErrorCode::SUCCEEDED: { + case cpp2::ErrorCode::SUCCEEDED: + case cpp2::ErrorCode::E_LOG_GAP: + case cpp2::ErrorCode::E_LOG_STALE: { VLOG(2) << self->idStr_ << "AppendLog request sent successfully"; std::shared_ptr newReq; @@ -175,161 +176,44 @@ void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr g(self->lock_); auto res = self->checkStatus(); if (res != cpp2::ErrorCode::SUCCEEDED) { - VLOG(2) << self->idStr_ - << "The host is not in a proper status," - " just return"; - cpp2::AppendLogResponse r; - r.set_error_code(res); - self->setResponse(r); - } else if (self->lastLogIdSent_ >= resp.get_last_log_id()) { - VLOG(2) << self->idStr_ << "We send nothing in the last request" - << ", so we don't send the same logs again"; - self->followerCommittedLogId_ = resp.get_committed_log_id(); cpp2::AppendLogResponse r; r.set_error_code(res); self->setResponse(r); - } else { - self->lastLogIdSent_ = resp.get_last_log_id(); - self->lastLogTermSent_ = resp.get_last_log_term(); - self->followerCommittedLogId_ = resp.get_committed_log_id(); - if (self->lastLogIdSent_ < self->logIdToSend_) { - // More to send - VLOG(2) << self->idStr_ << "There are more logs to send"; - newReq = self->prepareAppendLogRequest(); - } else { - VLOG(2) << self->idStr_ - << "Fulfill the promise, size = " << self->promise_.size(); - // Fulfill the promise - self->promise_.setValue(resp); - - if (self->noRequest()) { - VLOG(2) << self->idStr_ << "No request any more!"; - self->requestOnGoing_ = false; - } else { - auto& tup = self->pendingReq_; - self->logTermToSend_ = std::get<0>(tup); - self->logIdToSend_ = std::get<1>(tup); - self->committedLogId_ = std::get<2>(tup); - VLOG(2) << self->idStr_ << "Sending the pending request in the queue" - << ", from " << self->lastLogIdSent_ + 1 << " to " - << self->logIdToSend_; - newReq = self->prepareAppendLogRequest(); - self->promise_ = std::move(self->cachingPromise_); - self->cachingPromise_ = folly::SharedPromise(); - self->pendingReq_ = std::make_tuple(0, 0, 0); - } // self->noRequest() - } // self->lastLogIdSent_ < self->logIdToSend_ - } // else - } - if (newReq) { - self->appendLogsInternal(eb, newReq); - } else { - self->noMoreRequestCV_.notify_all(); - } - return; - } - case cpp2::ErrorCode::E_LOG_GAP: { - VLOG(2) << self->idStr_ << "The host's log is behind, need to catch up"; - std::shared_ptr newReq; - { - std::lock_guard g(self->lock_); - auto res = self->checkStatus(); - if (res != cpp2::ErrorCode::SUCCEEDED) { - VLOG(2) << self->idStr_ - << "The host is not in a proper status," - " skip catching up the gap"; - cpp2::AppendLogResponse r; - r.set_error_code(res); - self->setResponse(r); - } else if (self->lastLogIdSent_ == resp.get_last_log_id()) { - VLOG(2) << self->idStr_ << "We send nothing in the last request" - << ", so we don't send the same logs again"; - self->lastLogIdSent_ = resp.get_last_log_id(); - self->lastLogTermSent_ = resp.get_last_log_term(); - self->followerCommittedLogId_ = resp.get_committed_log_id(); - cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::SUCCEEDED); - self->setResponse(r); - } else { - self->lastLogIdSent_ = std::min(resp.get_last_log_id(), self->logIdToSend_ - 1); - self->lastLogTermSent_ = resp.get_last_log_term(); - self->followerCommittedLogId_ = resp.get_committed_log_id(); - newReq = self->prepareAppendLogRequest(); + return; } - } - if (newReq) { - self->appendLogsInternal(eb, newReq); - } else { - self->noMoreRequestCV_.notify_all(); - } - return; - } - case cpp2::ErrorCode::E_WAITING_SNAPSHOT: { - LOG(INFO) << self->idStr_ - << "The host is waiting for the snapshot, so we need to " - "send log from " - << "current committedLogId " << self->committedLogId_; - std::shared_ptr newReq; - { - std::lock_guard g(self->lock_); - auto res = self->checkStatus(); - if (res != cpp2::ErrorCode::SUCCEEDED) { - VLOG(2) << self->idStr_ - << "The host is not in a proper status," - " skip waiting the snapshot"; - cpp2::AppendLogResponse r; - r.set_error_code(res); - self->setResponse(r); - } else { - self->lastLogIdSent_ = self->committedLogId_; - self->lastLogTermSent_ = self->logTermToSend_; - self->followerCommittedLogId_ = resp.get_committed_log_id(); - newReq = self->prepareAppendLogRequest(); - } - } - if (newReq) { - self->appendLogsInternal(eb, newReq); - } else { - self->noMoreRequestCV_.notify_all(); - } - return; - } - case cpp2::ErrorCode::E_LOG_STALE: { - VLOG(2) << self->idStr_ << "Log stale, reset lastLogIdSent " << self->lastLogIdSent_ - << " to the followers lastLodId " << resp.get_last_log_id(); - std::shared_ptr newReq; - { - std::lock_guard g(self->lock_); - auto res = self->checkStatus(); - if (res != cpp2::ErrorCode::SUCCEEDED) { - VLOG(2) << self->idStr_ - << "The host is not in a proper status," - " skip waiting the snapshot"; - cpp2::AppendLogResponse r; - r.set_error_code(res); - self->setResponse(r); - } else if (self->logIdToSend_ <= resp.get_last_log_id()) { - VLOG(2) << self->idStr_ << "It means the request has been received by follower"; - self->lastLogIdSent_ = self->logIdToSend_ - 1; - self->lastLogTermSent_ = resp.get_last_log_term(); - self->followerCommittedLogId_ = resp.get_committed_log_id(); - cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::SUCCEEDED); - self->setResponse(r); + // Host is working + self->lastLogIdSent_ = resp.get_last_log_id(); + self->lastLogTermSent_ = resp.get_last_log_term(); + self->followerCommittedLogId_ = resp.get_committed_log_id(); + if (self->lastLogIdSent_ < self->logIdToSend_) { + // More to send + VLOG(2) << self->idStr_ << "There are more logs to send"; + auto result = self->prepareAppendLogRequest(); + if (ok(result)) { + newReq = std::move(value(result)); + } else { + cpp2::AppendLogResponse r; + r.set_error_code(error(result)); + self->setResponse(r); + return; + } } else { - self->lastLogIdSent_ = std::min(resp.get_last_log_id(), self->logIdToSend_ - 1); - self->lastLogTermSent_ = resp.get_last_log_term(); - self->followerCommittedLogId_ = resp.get_committed_log_id(); - newReq = self->prepareAppendLogRequest(); + // resp.get_last_log_id() >= self->logIdToSend_ + // All logs up to logIdToSend_ has been sent, fulfill the promise + self->promise_.setValue(resp); + // Check if there are any pending request: + // Eithor send pending requst if any, or set Host to vacant + newReq = self->getPendingReqIfAny(self); } } if (newReq) { self->appendLogsInternal(eb, newReq); - } else { - self->noMoreRequestCV_.notify_all(); } return; } + // Usually the peer is not in proper state, for example: + // E_UNKNOWN_PART/E_BAD_STATE/E_NOT_READY/E_WAITING_SNAPSHOT + // In this case, nothing changed, just return the error default: { LOG_EVERY_N(ERROR, 100) << self->idStr_ << "Failed to append logs to the host (Err: " @@ -337,9 +221,7 @@ void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptr g(self->lock_); self->setResponse(resp); - self->lastLogIdSent_ = self->logIdToSend_ - 1; } - self->noMoreRequestCV_.notify_all(); return; } } @@ -348,66 +230,63 @@ void Host::appendLogsInternal(folly::EventBase* eb, std::shared_ptridStr_ << ex.what(); cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); + r.set_error_code(cpp2::ErrorCode::E_RPC_EXCEPTION); { std::lock_guard g(self->lock_); if (ex.getType() == TransportException::TIMED_OUT) { - VLOG(2) << self->idStr_ << "append log time out" - << ", space " << req->get_space() << ", part " << req->get_part() - << ", current term " << req->get_current_term() << ", last_log_id " - << req->get_last_log_id() << ", committed_id " - << req->get_committed_log_id() << ", last_log_term_sent" - << req->get_last_log_term_sent() << ", last_log_id_sent " - << req->get_last_log_id_sent() << ", logs size " - << req->get_log_str_list().size(); + LOG_IF(INFO, FLAGS_trace_raft) + << self->idStr_ << "append log time out" + << ", space " << req->get_space() << ", part " << req->get_part() + << ", current term " << req->get_current_term() << ", last_log_id " + << req->get_last_log_id() << ", committed_id " + << req->get_committed_log_id() << ", last_log_term_sent " + << req->get_last_log_term_sent() << ", last_log_id_sent " + << req->get_last_log_id_sent() << ", set lastLogIdSent_ to logIdToSend_ " + << self->logIdToSend_ << ", logs size " + << req->get_log_str_list().size(); } self->setResponse(r); - self->lastLogIdSent_ = self->logIdToSend_ - 1; } // a new raft log or heartbeat will trigger another appendLogs in Host - self->noMoreRequestCV_.notify_all(); return; }) .thenError(folly::tag_t{}, [self = shared_from_this()](std::exception&& ex) { VLOG(2) << self->idStr_ << ex.what(); cpp2::AppendLogResponse r; - r.set_error_code(cpp2::ErrorCode::E_EXCEPTION); + r.set_error_code(cpp2::ErrorCode::E_RPC_EXCEPTION); { std::lock_guard g(self->lock_); self->setResponse(r); - self->lastLogIdSent_ = self->logIdToSend_ - 1; } // a new raft log or heartbeat will trigger another appendLogs in Host - self->noMoreRequestCV_.notify_all(); return; }); } -std::shared_ptr Host::prepareAppendLogRequest() { +ErrorOr> Host::prepareAppendLogRequest() { CHECK(!lock_.try_lock()); - auto req = std::make_shared(); - req->set_space(part_->spaceId()); - req->set_part(part_->partitionId()); - req->set_current_term(logTermToSend_); - req->set_last_log_id(logIdToSend_); - req->set_leader_addr(part_->address().host); - req->set_leader_port(part_->address().port); - req->set_committed_log_id(committedLogId_); - req->set_last_log_term_sent(lastLogTermSent_); - req->set_last_log_id_sent(lastLogIdSent_); - VLOG(2) << idStr_ << "Prepare AppendLogs request from Log " << lastLogIdSent_ + 1 << " to " << logIdToSend_; if (lastLogIdSent_ + 1 > part_->wal()->lastLogId()) { - LOG(INFO) << idStr_ << "My lastLogId in wal is " << part_->wal()->lastLogId() - << ", but you are seeking " << lastLogIdSent_ + 1 << ", so i have nothing to send."; - return req; + LOG_IF(INFO, FLAGS_trace_raft) + << idStr_ << "My lastLogId in wal is " << part_->wal()->lastLogId() + << ", but you are seeking " << lastLogIdSent_ + 1 + << ", so i have nothing to send, logIdToSend_ = " << logIdToSend_; + return cpp2::ErrorCode::E_NO_WAL_FOUND; } auto it = part_->wal()->iterator(lastLogIdSent_ + 1, logIdToSend_); if (it->valid()) { - VLOG(2) << idStr_ << "Prepare the list of log entries to send"; - auto term = it->logTerm(); + auto req = std::make_shared(); + req->set_space(part_->spaceId()); + req->set_part(part_->partitionId()); + req->set_current_term(logTermToSend_); + req->set_last_log_id(logIdToSend_); + req->set_leader_addr(part_->address().host); + req->set_leader_port(part_->address().port); + req->set_committed_log_id(committedLogId_); + req->set_last_log_term_sent(lastLogTermSent_); + req->set_last_log_id_sent(lastLogIdSent_); req->set_log_term(term); std::vector logs; @@ -420,9 +299,8 @@ std::shared_ptr Host::prepareAppendLogRequest() { logs.emplace_back(std::move(le)); } req->set_log_str_list(std::move(logs)); - req->set_sending_snapshot(false); + return req; } else { - req->set_sending_snapshot(true); if (!sendingSnapshot_) { LOG(INFO) << idStr_ << "Can't find log " << lastLogIdSent_ + 1 << " in wal, send the snapshot" << ", logIdToSend = " << logIdToSend_ @@ -430,21 +308,28 @@ std::shared_ptr Host::prepareAppendLogRequest() { << ", lastLogId in wal = " << part_->wal()->lastLogId(); sendingSnapshot_ = true; part_->snapshot_->sendSnapshot(part_, addr_) - .thenValue([self = shared_from_this()](Status&& status) { + .thenValue([self = shared_from_this()](auto&& status) { + std::lock_guard g(self->lock_); if (status.ok()) { - LOG(INFO) << self->idStr_ << "Send snapshot succeeded!"; + auto commitLogIdAndTerm = status.value(); + self->lastLogIdSent_ = commitLogIdAndTerm.first; + self->lastLogTermSent_ = commitLogIdAndTerm.second; + self->followerCommittedLogId_ = commitLogIdAndTerm.first; + LOG(INFO) << self->idStr_ << "Send snapshot succeeded!" + << " commitLogId = " << commitLogIdAndTerm.first + << " commitLogTerm = " << commitLogIdAndTerm.second; } else { LOG(INFO) << self->idStr_ << "Send snapshot failed!"; // TODO(heng): we should tell the follower i am failed. } self->sendingSnapshot_ = false; + self->noMoreRequestCV_.notify_all(); }); } else { - LOG_EVERY_N(INFO, 30) << idStr_ << "The snapshot req is in queue, please wait for a moment"; + LOG_EVERY_N(INFO, 100) << idStr_ << "The snapshot req is in queue, please wait for a moment"; } + return cpp2::ErrorCode::E_WAITING_SNAPSHOT; } - - return req; } folly::Future Host::sendAppendLogRequest( @@ -466,9 +351,10 @@ folly::Future Host::sendAppendLogRequest( << ", part " << req->get_part() << ", current term " << req->get_current_term() << ", last_log_id " << req->get_last_log_id() << ", committed_id " - << req->get_committed_log_id() << ", last_log_term_sent" + << req->get_committed_log_id() << ", last_log_term_sent " << req->get_last_log_term_sent() << ", last_log_id_sent " - << req->get_last_log_id_sent(); + << req->get_last_log_id_sent() << ", logs in request " + << req->get_log_str_list().size(); // Get client connection auto client = part_->clientMan_->client(addr_, eb, false, FLAGS_raft_rpc_timeout_ms); return client->future_appendLog(*req); @@ -499,7 +385,7 @@ folly::Future Host::sendHeartbeat(folly::EventBase* eb, VLOG(3) << self->idStr_ << "heartbeat call got response"; if (t.hasException()) { cpp2::HeartbeatResponse resp; - resp.set_error_code(cpp2::ErrorCode::E_EXCEPTION); + resp.set_error_code(cpp2::ErrorCode::E_RPC_EXCEPTION); pro.setValue(std::move(resp)); return; } else { @@ -542,5 +428,40 @@ bool Host::noRequest() const { return pendingReq_ == emptyTup; } +std::shared_ptr Host::getPendingReqIfAny(std::shared_ptr self) { + CHECK(!self->lock_.try_lock()); + CHECK(self->requestOnGoing_) << self->idStr_; + + // Check if there are any pending request to send + if (self->noRequest()) { + self->noMoreRequestCV_.notify_all(); + self->requestOnGoing_ = false; + return nullptr; + } + + // there is pending request + auto& tup = self->pendingReq_; + self->logTermToSend_ = std::get<0>(tup); + self->logIdToSend_ = std::get<1>(tup); + self->committedLogId_ = std::get<2>(tup); + + LOG_IF(INFO, FLAGS_trace_raft) << self->idStr_ << "Sending the pending request in the queue" + << ", from " << self->lastLogIdSent_ + 1 << " to " + << self->logIdToSend_; + self->pendingReq_ = std::make_tuple(0, 0, 0); + self->promise_ = std::move(self->cachingPromise_); + self->cachingPromise_ = folly::SharedPromise(); + + auto result = self->prepareAppendLogRequest(); + if (ok(result)) { + return value(result); + } else { + cpp2::AppendLogResponse r; + r.set_error_code(error(result)); + self->setResponse(r); + return nullptr; + } +} + } // namespace raftex } // namespace nebula diff --git a/src/kvstore/raftex/Host.h b/src/kvstore/raftex/Host.h index 3cc23ef0ad8..db52bee54ec 100644 --- a/src/kvstore/raftex/Host.h +++ b/src/kvstore/raftex/Host.h @@ -9,6 +9,7 @@ #include #include "common/base/Base.h" +#include "common/base/ErrorOr.h" #include "common/thrift/ThriftClientManager.h" #include "interface/gen-cpp2/RaftexServiceAsyncClient.h" #include "interface/gen-cpp2/raftex_types.h" @@ -32,18 +33,6 @@ class Host final : public std::enable_shared_from_this { const char* idStr() const { return idStr_.c_str(); } - // This will be called when the shard lost its leadership - void pause() { - std::lock_guard g(lock_); - paused_ = true; - } - - // This will be called when the shard becomes the leader - void resume() { - std::lock_guard g(lock_); - paused_ = false; - } - void stop() { std::lock_guard g(lock_); stopped_ = true; @@ -99,12 +88,14 @@ class Host final : public std::enable_shared_from_this { folly::Future sendHeartbeatRequest( folly::EventBase* eb, std::shared_ptr req); - std::shared_ptr prepareAppendLogRequest(); + ErrorOr> prepareAppendLogRequest(); bool noRequest() const; void setResponse(const cpp2::AppendLogResponse& r); + std::shared_ptr getPendingReqIfAny(std::shared_ptr self); + private: // using Request = std::tuple; @@ -116,10 +107,13 @@ class Host final : public std::enable_shared_from_this { mutable std::mutex lock_; - bool paused_{false}; bool stopped_{false}; + // whether there is a batch of logs for target host in on going bool requestOnGoing_{false}; + // whether there is a snapshot for target host in on going + bool sendingSnapshot_{false}; + std::condition_variable noMoreRequestCV_; folly::SharedPromise promise_; folly::SharedPromise cachingPromise_; @@ -135,7 +129,6 @@ class Host final : public std::enable_shared_from_this { TermID lastLogTermSent_{0}; LogID committedLogId_{0}; - std::atomic_bool sendingSnapshot_{false}; // CommittedLogId of follower LogID followerCommittedLogId_{0}; diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index 9e2db03be09..99dead9f85b 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -75,6 +75,8 @@ class AppendLogsIterator final : public LogIterator { LogID firstLogId() const { return firstLogId_; } + LogID lastLogId() const { return firstLogId_ + logs_.size() - 1; } + // Return true if the current log is a AtomicOp, otherwise return false bool processAtomicOp() { while (idx_ < logs_.size()) { @@ -305,7 +307,7 @@ void RaftPart::stop() { decltype(hosts_) hosts; { - std::unique_lock lck(raftLock_); + std::lock_guard lck(raftLock_); status_ = Status::STOPPED; leader_ = {"", 0}; role_ = Role::FOLLOWER; @@ -378,11 +380,11 @@ void RaftPart::preProcessTransLeader(const HostAddr& target) { LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!"; bgWorkers_->addTask([self = shared_from_this()] { { - std::unique_lock lck(self->raftLock_); + std::lock_guard lck(self->raftLock_); self->role_ = Role::CANDIDATE; self->leader_ = HostAddr("", 0); } - self->leaderElection(); + self->leaderElection().get(); }); } break; @@ -762,7 +764,8 @@ void RaftPart::replicateLogs(folly::EventBase* eb, return; } - VLOG(2) << idStr_ << "About to replicate logs to all peer hosts"; + LOG_IF(INFO, FLAGS_trace_raft) << idStr_ << "About to replicate logs in range [" + << iter.firstLogId() << ", " << lastLogId << "] to all peer hosts"; lastMsgSentDur_.reset(); SlowOpTracker tracker; @@ -973,21 +976,6 @@ bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req, return false; } - if (UNLIKELY(status_ == Status::STOPPED)) { - VLOG(2) << idStr_ << "The part has been stopped, skip the request"; - return false; - } - - if (UNLIKELY(status_ == Status::STARTING)) { - VLOG(2) << idStr_ << "The partition is still starting"; - return false; - } - - if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { - VLOG(2) << idStr_ << "The partition is still waiting snapshot"; - return false; - } - // Make sure the role is still CANDIDATE if (role_ != Role::CANDIDATE) { VLOG(2) << idStr_ << "A leader has been elected"; @@ -1027,7 +1015,7 @@ typename RaftPart::Role RaftPart::processElectionResponses( } if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { - LOG(INFO) << idStr_ << "The partition is still waitiong snapshot"; + LOG(INFO) << idStr_ << "The partition is still waiting snapshot"; return role_; } @@ -1065,15 +1053,15 @@ typename RaftPart::Role RaftPart::processElectionResponses( return role_; } -bool RaftPart::leaderElection() { +folly::Future RaftPart::leaderElection() { VLOG(2) << idStr_ << "Start leader election..."; using namespace folly; // NOLINT since the fancy overload of | operator bool expected = false; + if (!inElection_.compare_exchange_strong(expected, true)) { - return true; + return false; } - SCOPE_EXIT { inElection_ = false; }; cpp2::AskForVoteRequest voteReq; decltype(hosts_) hosts; @@ -1088,6 +1076,7 @@ bool RaftPart::leaderElection() { // So we neeed to go back to the follower state to avoid the case. std::lock_guard g(raftLock_); role_ = Role::FOLLOWER; + inElection_ = false; return false; } @@ -1102,53 +1091,66 @@ bool RaftPart::leaderElection() { auto proposedTerm = voteReq.get_term(); auto resps = ElectionResponses(); if (hosts.empty()) { - VLOG(2) << idStr_ << "No peer found, I will be the leader"; + auto ret = handleElectionResponses(resps, hosts, proposedTerm); + inElection_ = false; + return ret; } else { + folly::Promise promise; + auto future = promise.getFuture(); auto eb = ioThreadPool_->getEventBase(); - auto futures = collectNSucceeded( - gen::from(hosts) | gen::map([eb, self = shared_from_this(), &voteReq](auto& host) { - VLOG(2) << self->idStr_ << "Sending AskForVoteRequest to " << host->idStr(); - return via(eb, [&voteReq, &host, eb]() -> Future { - return host->askForVote(voteReq, eb); - }); - }) | gen::as(), + collectNSucceeded( + gen::from(hosts) | + gen::map([eb, self = shared_from_this(), voteReq](std::shared_ptr host) { + VLOG(2) << self->idStr_ << "Sending AskForVoteRequest to " << host->idStr(); + return via(eb, [voteReq, host, eb]() -> Future { + return host->askForVote(voteReq, eb); + }); + }) | + gen::as(), // Number of succeeded required quorum_, // Result evaluator [hosts](size_t idx, cpp2::AskForVoteResponse& resp) { return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED && !hosts[idx]->isLearner(); + }) + .via(executor_.get()) + .then([self = shared_from_this(), pro = std::move(promise), hosts, proposedTerm]( + auto&& t) mutable { + VLOG(2) << self->idStr_ + << "AskForVoteRequest has been sent to all peers, waiting for responses"; + CHECK(!t.hasException()); + pro.setValue(self->handleElectionResponses(t.value(), std::move(hosts), proposedTerm)); + self->inElection_ = false; }); - - VLOG(2) << idStr_ - << "AskForVoteRequest has been sent to all peers" - ", waiting for responses"; - futures.wait(); - CHECK(!futures.hasException()) - << "Got exception -- " << futures.result().exception().what().toStdString(); - VLOG(2) << idStr_ << "Got AskForVote response back"; - - resps = std::move(futures).get(); + return future; } +} +bool RaftPart::handleElectionResponses(const ElectionResponses& resps, + const std::vector>& peers, + TermID proposedTerm) { // Process the responses - switch (processElectionResponses(resps, std::move(hosts), proposedTerm)) { + switch (processElectionResponses(resps, std::move(peers), proposedTerm)) { case Role::LEADER: { // Elected LOG(INFO) << idStr_ << "The partition is elected as the leader"; + std::vector> hosts; { std::lock_guard g(raftLock_); if (status_ == Status::RUNNING) { leader_ = addr_; - for (auto& host : hosts_) { - host->reset(); - } + hosts = hosts_; bgWorkers_->addTask( - [self = shared_from_this(), term = voteReq.get_term()] { self->onElected(term); }); + [self = shared_from_this(), proposedTerm] { self->onElected(proposedTerm); }); lastMsgAcceptedTime_ = 0; } weight_ = 1; commitInThisTerm_ = false; } + // reset host can't be executed with raftLock_, otherwise it may encounter deadlock + for (auto& host : hosts) { + host->reset(); + } sendHeartbeat(); return true; } @@ -1184,7 +1186,7 @@ void RaftPart::statusPolling(int64_t startTime) { } size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3; if (needToStartElection()) { - if (leaderElection()) { + if (leaderElection().get()) { VLOG(2) << idStr_ << "Stop the election"; } else { // No leader has been elected, need to continue @@ -1197,7 +1199,6 @@ void RaftPart::statusPolling(int64_t startTime) { sendHeartbeat(); } if (needToCleanupSnapshot()) { - LOG(INFO) << idStr_ << "Clean up the snapshot"; cleanupSnapshot(); } { @@ -1261,7 +1262,7 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { LOG(INFO) << idStr_ << "The partition is still waiting snapshot"; - resp.set_error_code(cpp2::ErrorCode::E_NOT_READY); + resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT); return; } @@ -1331,7 +1332,7 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, // Before change role from leader to follower, check the logs locally. if (role_ == Role::LEADER && wal_->lastLogId() > lastLogId_) { - LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId() + LOG(INFO) << idStr_ << "There are some logs up to " << wal_->lastLogId() << " i did not commit when i was leader, rollback to " << lastLogId_; wal_->rollbackToLog(lastLogId_); } @@ -1364,11 +1365,11 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, << ", lastLogTermSent = " << req.get_last_log_term_sent() << ", num_logs = " << req.get_log_str_list().size() << ", logTerm = " << req.get_log_term() - << ", sendingSnapshot = " << req.get_sending_snapshot() << ", local lastLogId = " << lastLogId_ << ", local lastLogTerm = " << lastLogTerm_ << ", local committedLogId = " << committedLogId_ - << ", local current term = " << term_; + << ", local current term = " << term_ + << ", wal lastLogId = " << wal_->lastLogId(); std::lock_guard g(raftLock_); resp.set_current_term(term_); @@ -1389,6 +1390,11 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, resp.set_error_code(cpp2::ErrorCode::E_NOT_READY); return; } + if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { + VLOG(2) << idStr_ << "The partition is waiting for snapshot"; + resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT); + return; + } // Check leadership cpp2::ErrorCode err = verifyLeader(req); if (err != cpp2::ErrorCode::SUCCEEDED) { @@ -1401,54 +1407,6 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, // Reset the timeout timer lastMsgRecvDur_.reset(); - if (req.get_sending_snapshot() && status_ != Status::WAITING_SNAPSHOT) { - LOG(INFO) << idStr_ << "Begin to wait for the snapshot" - << " " << req.get_committed_log_id(); - reset(); - status_ = Status::WAITING_SNAPSHOT; - resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT); - return; - } - - if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { - VLOG(2) << idStr_ << "The part is receiving snapshot," - << "so just accept the new wals, but don't commit them." - << "last_log_id_sent " << req.get_last_log_id_sent() << ", total log number " - << req.get_log_str_list().size(); - if (lastLogId_ > 0 && req.get_last_log_id_sent() > lastLogId_) { - // There is a gap - LOG(INFO) << idStr_ << "Local is missing logs from id " << lastLogId_ << ". Need to catch up"; - resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP); - return; - } - // TODO(heng): if we have 3 node, one is leader, one is wait snapshot and - // return success, the other is follower, but leader replica log to follow - // failed, How to deal with leader crash? At this time, no leader will be - // elected. - size_t numLogs = req.get_log_str_list().size(); - LogID firstId = req.get_last_log_id_sent() + 1; - - VLOG(2) << idStr_ << "Writing log [" << firstId << ", " << firstId + numLogs - 1 << "] to WAL"; - LogStrListIterator iter(firstId, req.get_log_term(), req.get_log_str_list()); - if (wal_->appendLogs(iter)) { - // When leader has been sending a snapshot already, sometimes it would - // send a request with empty log list, and lastLogId in wal may be 0 - // because of reset. - if (numLogs != 0) { - CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId()) << "First Id is " << firstId; - } - lastLogId_ = wal_->lastLogId(); - lastLogTerm_ = wal_->lastLogTerm(); - resp.set_last_log_id(lastLogId_); - resp.set_last_log_term(lastLogTerm_); - resp.set_error_code(cpp2::ErrorCode::SUCCEEDED); - } else { - LOG_EVERY_N(WARNING, 100) << idStr_ << "Failed to append logs to WAL"; - resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL); - } - return; - } - if (req.get_last_log_id_sent() < committedLogId_ && req.get_last_log_term_sent() <= term_) { LOG(INFO) << idStr_ << "Stale log! The log " << req.get_last_log_id_sent() << ", term " << req.get_last_log_term_sent() << " i had committed yet. My committedLogId is " @@ -1460,7 +1418,7 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, << ", the log term is " << req.get_last_log_term_sent() << ", but my committedLogId is " << committedLogId_ << ", my term is " << term_ << ", to make the cluster stable i will follow the high term" - << " candidate and clenaup my data"; + << " candidate and cleanup my data"; reset(); resp.set_committed_log_id(committedLogId_); resp.set_last_log_id(lastLogId_); @@ -1469,64 +1427,101 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, } // req.get_last_log_id_sent() >= committedLogId_ - if (lastLogTerm_ > 0 && req.get_last_log_term_sent() != lastLogTerm_) { - LOG(INFO) << idStr_ << "The local last log term is " << lastLogTerm_ - << ", which is different from the leader's prevLogTerm " - << req.get_last_log_term_sent() << ", the prevLogId is " << req.get_last_log_id_sent() - << ". So need to rollback to last committedLogId_ " << committedLogId_; - if (wal_->rollbackToLog(committedLogId_)) { - lastLogId_ = wal_->lastLogId(); - lastLogTerm_ = wal_->lastLogTerm(); - resp.set_last_log_id(lastLogId_); - resp.set_last_log_term(lastLogTerm_); - LOG(INFO) << idStr_ << "Rollback succeeded! lastLogId is " << lastLogId_ << ", logLogTerm is " - << lastLogTerm_ << ", committedLogId is " << committedLogId_ << ", term is " - << term_; - } - resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP); - return; + if (req.get_last_log_id_sent() == lastLogId_ && req.get_last_log_term_sent() == lastLogTerm_) { + // nothing to do + // just append log later } else if (req.get_last_log_id_sent() > lastLogId_) { // There is a gap LOG(INFO) << idStr_ << "Local is missing logs from id " << lastLogId_ << ". Need to catch up"; resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP); return; - } else if (req.get_last_log_id_sent() < lastLogId_) { - // TODO(doodle): This is a potential bug which would cause data not in - // consensus. In most case, we would hit this path when leader append logs - // to follower and timeout (leader would set lastLogIdSent_ = logIdToSend_ - - // 1 in Host). **But follower actually received it successfully**. Which - // will explain when leader retry to append these logs, the LOG belows is - // printed, and lastLogId_ == req.get_last_log_id_sent() + 1 in the LOG. - // - // In fact we should always rollback to req.get_last_log_id_sent(), and - // append the logs from leader (we can't make promise that the logs in range - // [req.get_last_log_id_sent() + 1, lastLogId_] is same with follower). - // However, this makes no difference in the above case. - LOG(INFO) << idStr_ << "Stale log! Local lastLogId " << lastLogId_ << ", lastLogTerm " - << lastLogTerm_ << ", lastLogIdSent " << req.get_last_log_id_sent() - << ", lastLogTermSent " << req.get_last_log_term_sent(); - resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE); - return; + } else { + // check the last log term is matched or not + int reqLastLogTerm = wal_->getLogTerm(req.get_last_log_id_sent()); + if (req.get_last_log_term_sent() != reqLastLogTerm) { + LOG(INFO) << idStr_ << "The local log term is " << reqLastLogTerm + << ", which is different from the leader's prevLogTerm " + << req.get_last_log_term_sent() << ", the prevLogId is " + << req.get_last_log_id_sent() << ". So ask leader to send logs from committedLogId " + << committedLogId_; + TermID committedLogTerm = wal_->getLogTerm(committedLogId_); + if (committedLogTerm > 0) { + resp.set_last_log_id(committedLogId_); + resp.set_last_log_term(committedLogTerm); + } + resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP); + return; + } } - // Append new logs + // request get_last_log_term_sent == wal[get_last_log_id_sent].log_term size_t numLogs = req.get_log_str_list().size(); LogID firstId = req.get_last_log_id_sent() + 1; - VLOG(2) << idStr_ << "Writing log [" << firstId << ", " << firstId + numLogs - 1 << "] to WAL"; - LogStrListIterator iter(firstId, req.get_log_term(), req.get_log_str_list()); - if (wal_->appendLogs(iter)) { - if (numLogs != 0) { - CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId()) << "First Id is " << firstId; + + size_t diffIndex = 0; + do { + // find the first id/term not match, rollback until it, and append the remaining wal + if (!(req.get_last_log_id_sent() == lastLogId_ && + req.get_last_log_term_sent() == lastLogTerm_)) { + // check the diff index in log, find the first log which term is not same as term in request + { + std::unique_ptr it = wal_->iterator(firstId, firstId + numLogs - 1); + for (size_t i = 0; i < numLogs && it->valid(); i++, ++(*it), diffIndex++) { + int logTerm = it->logTerm(); + if (req.get_log_term() != logTerm) { + break; + } + } + } + + // stale log + if (diffIndex == numLogs) { + // All logs have been received before + resp.set_last_log_id(firstId + numLogs - 1); + resp.set_last_log_term(req.get_log_term()); + // nothing to append, goto commit + break; + } + + // rollback the wal + if (wal_->rollbackToLog(firstId + diffIndex - 1)) { + lastLogId_ = wal_->lastLogId(); + lastLogTerm_ = wal_->lastLogTerm(); + LOG(INFO) << idStr_ << "Rollback succeeded! lastLogId is " << lastLogId_ + << ", logLogTerm is " << lastLogTerm_ << ", committedLogId is " << committedLogId_ + << ", logs in request " << numLogs << ", remaining logs after rollback " + << numLogs - diffIndex; + } else { + LOG(ERROR) << idStr_ << "Rollback fail! lastLogId is" << lastLogId_ << ", logLogTerm is " + << lastLogTerm_ << ", committedLogId is " << committedLogId_ + << ", rollback id is " << firstId + diffIndex - 1; + resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL); + return; + } + + // update msg + firstId = firstId + diffIndex; + numLogs = numLogs - diffIndex; } - lastLogId_ = wal_->lastLogId(); - lastLogTerm_ = wal_->lastLogTerm(); - resp.set_last_log_id(lastLogId_); - resp.set_last_log_term(lastLogTerm_); - } else { - LOG_EVERY_N(WARNING, 100) << idStr_ << "Failed to append logs to WAL"; - resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL); - return; - } + + // Append new logs + std::vector logEntries = std::vector( + std::make_move_iterator(req.get_log_str_list().begin() + diffIndex), + std::make_move_iterator(req.get_log_str_list().end())); + LogStrListIterator iter(firstId, req.get_log_term(), std::move(logEntries)); + if (wal_->appendLogs(iter)) { + if (numLogs != 0) { + CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId()) << "First Id is " << firstId; + } + lastLogId_ = wal_->lastLogId(); + lastLogTerm_ = wal_->lastLogTerm(); + resp.set_last_log_id(lastLogId_); + resp.set_last_log_term(lastLogTerm_); + } else { + resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL); + return; + } + } while (false); LogID lastLogIdCanCommit = std::min(lastLogId_, req.get_committed_log_id()); if (lastLogIdCanCommit > committedLogId_) { @@ -1738,14 +1733,12 @@ void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req, } if (req.get_done()) { committedLogId_ = req.get_committed_log_id(); - if (lastLogId_ < committedLogId_) { - lastLogId_ = committedLogId_; - lastLogTerm_ = req.get_committed_log_term(); - } - if (wal_->lastLogId() <= committedLogId_) { - LOG(INFO) << idStr_ << "Reset invalid wal after snapshot received"; - wal_->reset(); - } + lastLogId_ = committedLogId_; + lastLogTerm_ = req.get_committed_log_term(); + term_ = proposedTerm_ = lastLogTerm_; + // there should be no wal after state converts to WAITING_SNAPSHOT, the RaftPart has been reset + DCHECK_EQ(wal_->firstLogId(), 0); + DCHECK_EQ(wal_->lastLogId(), 0); status_ = Status::RUNNING; LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_ << ", lastLodId " << lastLogId_ << ", lastLogTermId " << lastLogTerm_; diff --git a/src/kvstore/raftex/RaftPart.h b/src/kvstore/raftex/RaftPart.h index 437afdbaead..6454ee96d9b 100644 --- a/src/kvstore/raftex/RaftPart.h +++ b/src/kvstore/raftex/RaftPart.h @@ -344,8 +344,9 @@ class RaftPart : public std::enable_shared_from_this { void cleanupSnapshot(); // The method sends out AskForVote request - // It return true if a leader is elected, otherwise returns false - bool leaderElection(); + // Return true if a leader is elected (the leader could be self or others), + // otherwise returns false + folly::Future leaderElection(); // The method will fill up the request object and return TRUE // if the election should continue. Otherwise the method will @@ -353,6 +354,11 @@ class RaftPart : public std::enable_shared_from_this { bool prepareElectionRequest(cpp2::AskForVoteRequest& req, std::vector>& hosts); + // return true if elected as the leader, else return false + bool handleElectionResponses(const ElectionResponses& resps, + const std::vector>& hosts, + TermID proposedTerm); + // The method returns the partition's role after the election Role processElectionResponses(const ElectionResponses& results, std::vector> hosts, diff --git a/src/kvstore/raftex/SnapshotManager.cpp b/src/kvstore/raftex/SnapshotManager.cpp index 20f1f31715e..b6a3adc268f 100644 --- a/src/kvstore/raftex/SnapshotManager.cpp +++ b/src/kvstore/raftex/SnapshotManager.cpp @@ -26,9 +26,9 @@ SnapshotManager::SnapshotManager() { std::make_shared("snapshot-ioexecutor"))); } -folly::Future SnapshotManager::sendSnapshot(std::shared_ptr part, - const HostAddr& dst) { - folly::Promise p; +folly::Future>> SnapshotManager::sendSnapshot( + std::shared_ptr part, const HostAddr& dst) { + folly::Promise>> p; auto fut = p.getFuture(); executor_->add([this, p = std::move(p), part, dst]() mutable { auto spaceId = part->spaceId_; @@ -40,7 +40,7 @@ folly::Future SnapshotManager::sendSnapshot(std::shared_ptr pa auto commitLogIdAndTerm = part->lastCommittedLogId(); const auto& localhost = part->address(); std::vector> results; - LOG(INFO) << part->idStr_ << "Begin to send the snapshot" + LOG(INFO) << part->idStr_ << "Begin to send the snapshot to the host " << dst << ", commitLogId = " << commitLogIdAndTerm.first << ", commitLogTerm = " << commitLogIdAndTerm.second; accessAllRowsInSnapshot( @@ -77,7 +77,7 @@ folly::Future SnapshotManager::sendSnapshot(std::shared_ptr pa if (status == SnapshotStatus::DONE) { LOG(INFO) << part->idStr_ << "Finished, totalCount " << totalCount << ", totalSize " << totalSize; - p.setValue(Status::OK()); + p.setValue(commitLogIdAndTerm); } return true; } else { @@ -90,6 +90,7 @@ folly::Future SnapshotManager::sendSnapshot(std::shared_ptr pa } catch (const std::exception& e) { LOG(ERROR) << part->idStr_ << "Send snapshot failed, exception " << e.what() << ", retry " << retry << " times"; + sleep(1); continue; } } diff --git a/src/kvstore/raftex/SnapshotManager.h b/src/kvstore/raftex/SnapshotManager.h index a7c40ac3527..de613caaa6f 100644 --- a/src/kvstore/raftex/SnapshotManager.h +++ b/src/kvstore/raftex/SnapshotManager.h @@ -38,7 +38,8 @@ class SnapshotManager { virtual ~SnapshotManager() = default; // Send snapshot for spaceId, partId to host dst. - folly::Future sendSnapshot(std::shared_ptr part, const HostAddr& dst); + folly::Future>> sendSnapshot(std::shared_ptr part, + const HostAddr& dst); private: folly::Future send(GraphSpaceID spaceId, diff --git a/src/kvstore/raftex/test/CMakeLists.txt b/src/kvstore/raftex/test/CMakeLists.txt index c40b29720c3..d5368fae5fa 100644 --- a/src/kvstore/raftex/test/CMakeLists.txt +++ b/src/kvstore/raftex/test/CMakeLists.txt @@ -124,21 +124,6 @@ nebula_add_test( gtest ) -nebula_add_test( - NAME - snapshot_test - SOURCES - SnapshotTest.cpp - RaftexTestBase.cpp - TestShard.cpp - OBJECTS - ${RAFTEX_TEST_LIBS} - LIBRARIES - ${THRIFT_LIBRARIES} - wangle - gtest -) - nebula_add_test( NAME member_change_test diff --git a/src/kvstore/raftex/test/SnapshotTest.cpp b/src/kvstore/raftex/test/SnapshotTest.cpp deleted file mode 100644 index 9ff5a6eb656..00000000000 --- a/src/kvstore/raftex/test/SnapshotTest.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include -#include - -#include "common/base/Base.h" -#include "common/fs/FileUtils.h" -#include "common/fs/TempDir.h" -#include "common/network/NetworkUtils.h" -#include "common/thread/GenericThreadPool.h" -#include "kvstore/raftex/RaftexService.h" -#include "kvstore/raftex/test/RaftexTestBase.h" -#include "kvstore/raftex/test/TestShard.h" - -DECLARE_uint32(raft_heartbeat_interval_secs); -DECLARE_int32(wal_ttl); -DECLARE_int64(wal_file_size); -DECLARE_int32(wal_buffer_size); -DECLARE_int32(wal_buffer_num); -DECLARE_int32(raft_rpc_timeout_ms); - -namespace nebula { -namespace raftex { - -TEST(SnapshotTest, LearnerCatchUpDataTest) { - fs::TempDir walRoot("/tmp/catch_up_data.XXXXXX"); - FLAGS_wal_file_size = 1024; - FLAGS_wal_buffer_size = 512; - FLAGS_raft_rpc_timeout_ms = 2000; - std::shared_ptr workers; - std::vector wals; - std::vector allHosts; - std::vector> services; - std::vector> copies; - - std::shared_ptr leader; - std::vector isLearner = {false, false, false, true}; - setupRaft(4, walRoot, workers, wals, allHosts, services, copies, leader, isLearner); - - // Check all hosts agree on the same leader - checkLeadership(copies, leader); - - std::vector msgs; - for (int i = 0; i < 10; i++) { - appendLogs(i * 100, i * 100 + 99, leader, msgs, true); - } - // Sleep a while to make sure the last log has been committed on followers - sleep(FLAGS_raft_heartbeat_interval_secs); - - // Check every copy - for (int i = 0; i < 3; i++) { - ASSERT_EQ(1000, copies[i]->getNumLogs()); - } - - for (int i = 0; i < 1000; ++i) { - for (int j = 0; j < 3; j++) { - folly::StringPiece msg; - ASSERT_TRUE(copies[j]->getLogMsg(i, msg)); - ASSERT_EQ(msgs[i], msg.toString()); - } - } - // wait for the wal to be cleaned - FLAGS_wal_ttl = 1; - sleep(FLAGS_wal_ttl + 3); - FLAGS_wal_ttl = 60; - LOG(INFO) << "Add learner, we need to catch up data!"; - auto f = leader->sendCommandAsync(test::encodeLearner(allHosts[3])); - f.wait(); - - LOG(INFO) << "Let's continue to write some logs"; - for (int i = 10; i < 20; i++) { - appendLogs(i * 100, i * 100 + 99, leader, msgs, true); - } - sleep(FLAGS_raft_heartbeat_interval_secs); - - auto& learner = copies[3]; - ASSERT_EQ(2000, learner->getNumLogs()); - for (int i = 0; i < 2000; ++i) { - folly::StringPiece msg; - ASSERT_TRUE(learner->getLogMsg(i, msg)); - ASSERT_EQ(msgs[i], msg.toString()); - } - - LOG(INFO) << "Finished UT"; - finishRaft(services, copies, workers, leader); -} - -} // namespace raftex -} // namespace nebula - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - folly::init(&argc, &argv, true); - google::SetStderrLogging(google::INFO); - - return RUN_ALL_TESTS(); -} diff --git a/src/kvstore/wal/FileBasedWal.cpp b/src/kvstore/wal/FileBasedWal.cpp index ac6af23f6dc..caecf65de55 100644 --- a/src/kvstore/wal/FileBasedWal.cpp +++ b/src/kvstore/wal/FileBasedWal.cpp @@ -353,7 +353,6 @@ void FileBasedWal::rollbackInFile(WalFileInfoPtr info, LogID logId) { } lastLogId_ = logId; lastLogTerm_ = term; - LOG(INFO) << idStr_ << "Rollback to log " << logId; CHECK_GT(pos, 0) << "This wal should have been deleted"; if (pos < FileUtils::fileSize(path)) { @@ -610,6 +609,8 @@ bool FileBasedWal::rollbackToLog(LogID id) { VLOG(1) << "Roll back to log " << id << ", the last WAL file is now \"" << walFiles_.rbegin()->second->path() << "\""; rollbackInFile(walFiles_.rbegin()->second, id); + CHECK_EQ(lastLogId_, id); + CHECK_EQ(walFiles_.rbegin()->second->lastId(), id); } } @@ -631,7 +632,7 @@ bool FileBasedWal::reset() { std::vector files = FileUtils::listAllFilesInDir(dir_.c_str(), false, "*.wal"); for (auto& fn : files) { auto absFn = FileUtils::joinPath(dir_, fn); - LOG(INFO) << "Removing " << absFn; + VLOG(1) << "Removing " << absFn; unlink(absFn.c_str()); } lastLogId_ = firstLogId_ = 0; @@ -714,5 +715,14 @@ size_t FileBasedWal::accessAllWalInfo(std::function f return count; } +TermID FileBasedWal::getLogTerm(LogID id) { + TermID term = -1; + auto iter = iterator(id, id); + if (iter->valid()) { + term = iter->logTerm(); + } + return term; +} + } // namespace wal } // namespace nebula diff --git a/src/kvstore/wal/FileBasedWal.h b/src/kvstore/wal/FileBasedWal.h index f10cb52d989..57d9439a5d3 100644 --- a/src/kvstore/wal/FileBasedWal.h +++ b/src/kvstore/wal/FileBasedWal.h @@ -72,6 +72,9 @@ class FileBasedWal final : public Wal, public std::enable_shared_from_this wal, LogID startI } if (startId < wal_->firstLogId()) { - LOG(ERROR) << wal_->idStr_ << "The given log id " << startId - << " is out of the range, the wal firstLogId is " << wal_->firstLogId(); + VLOG(1) << wal_->idStr_ << "The given log id " << startId + << " is out of the range, the wal firstLogId is " << wal_->firstLogId(); currId_ = lastId_ + 1; return; } diff --git a/src/kvstore/wal/test/FileBasedWalTest.cpp b/src/kvstore/wal/test/FileBasedWalTest.cpp index 55b4004561c..a2a6a8a45f5 100644 --- a/src/kvstore/wal/test/FileBasedWalTest.cpp +++ b/src/kvstore/wal/test/FileBasedWalTest.cpp @@ -568,6 +568,40 @@ TEST(FileBasedWal, CleanWalBeforeIdTest) { CHECK_EQ(1000, wal->lastLogId()); } +TEST(FileBasedWal, getLogTermTest) { + TempDir walDir("/tmp/testWal.XXXXXX"); + FileBasedWalInfo info; + FileBasedWalPolicy policy; + policy.fileSize = 1024L * 1024L; + policy.bufferSize = 1024L * 1024L; + + auto wal = FileBasedWal::getWal( + walDir.path(), info, policy, [](LogID, TermID, ClusterID, const std::string&) { + return true; + }); + + // Append > 10MB logs in total + for (int i = 1; i <= 10000; i++) { + ASSERT_TRUE( + wal->appendLog(i /*id*/, i /*term*/, 0 /*cluster*/, folly::stringPrintf(kLongMsg, i))); + } + + // in the memory buffer + ASSERT_EQ(10000, wal->getLogTerm(10000)); + // in the file + ASSERT_EQ(4, wal->getLogTerm(4)); + + // Close the wal + wal.reset(); + + // Now let's open it to read + wal = FileBasedWal::getWal( + walDir.path(), info, policy, [](LogID, TermID, ClusterID, const std::string&) { + return true; + }); + EXPECT_EQ(10, wal->getLogTerm(10)); +} + } // namespace wal } // namespace nebula diff --git a/tests/admin/test_configs.py b/tests/admin/test_configs.py index 9c87734c9f6..a6a7213b3ed 100644 --- a/tests/admin/test_configs.py +++ b/tests/admin/test_configs.py @@ -60,7 +60,7 @@ def test_configs(self): expected_result = [ ['GRAPH', 'v', 'int', 'MUTABLE', v], ['GRAPH', 'minloglevel', 'int', 'MUTABLE', 0], - ['GRAPH', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 50], + ['GRAPH', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 100], ['GRAPH', 'heartbeat_interval_secs', 'int', 'MUTABLE', 1], ['GRAPH', 'meta_client_retry_times', 'int', 'MUTABLE', 3], ['GRAPH', 'accept_partial_success', 'bool', 'MUTABLE', False], @@ -80,7 +80,7 @@ def test_configs(self): ['STORAGE', 'wal_ttl', 'int', 'MUTABLE', 14400], ['STORAGE', 'minloglevel', 'int', 'MUTABLE', 0], ['STORAGE', 'custom_filter_interval_secs', 'int', 'MUTABLE', 86400], - ['STORAGE', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 50], + ['STORAGE', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 100], ['STORAGE', 'heartbeat_interval_secs', 'int', 'MUTABLE', 1], ['STORAGE', 'meta_client_retry_times', 'int', 'MUTABLE', 3], ['STORAGE', 'rocksdb_db_options', 'map', 'MUTABLE', {}], From 8d548dba299c9c15842515618943ff6c4980ab13 Mon Sep 17 00:00:00 2001 From: "jie.wang" <38901892+jievince@users.noreply.github.com> Date: Thu, 18 Nov 2021 11:11:21 +0800 Subject: [PATCH 24/53] Add more geo tests (#3293) * add some geo index tests * add some geo row writer test * remove common clause license * add col19 of geo null * remove debug log Co-authored-by: cpw <13495049+CPWstatic@users.noreply.github.com> Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .clang-tidy | 2 +- src/codec/RowWriterV2.cpp | 2 +- src/codec/test/RowWriterV2Test.cpp | 62 ++++++++- src/common/geo/test/CMakeLists.txt | 23 +++- src/common/geo/test/GeoIndexTest.cpp | 184 +++++++++++++++++++++++++++ 5 files changed, 269 insertions(+), 4 deletions(-) create mode 100644 src/common/geo/test/GeoIndexTest.cpp diff --git a/.clang-tidy b/.clang-tidy index 134cb61e561..5342022d5d2 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -67,7 +67,7 @@ Checks: '-*,clang-diagnostic-*,clang-analyzer-*,-misc-unused-parameters, google-upgrade-googletest-case, modernize-avoid-bind, - modernize-concat-nested-namespaces, + -modernize-concat-nested-namespaces, modernize-deprecated-headers, modernize-deprecated-ios-base-aliases, modernize-loop-convert, diff --git a/src/codec/RowWriterV2.cpp b/src/codec/RowWriterV2.cpp index 98954da232b..d5b1d0e74d0 100644 --- a/src/codec/RowWriterV2.cpp +++ b/src/codec/RowWriterV2.cpp @@ -844,7 +844,7 @@ std::string RowWriterV2::processOutOfSpace() noexcept { // Now let's process all strings for (size_t i = 0; i < schema_->getNumFields(); i++) { auto field = schema_->field(i); - if (field->type() != PropertyType::STRING) { + if (field->type() != PropertyType::STRING && field->type() != PropertyType::GEOGRAPHY) { continue; } diff --git a/src/codec/test/RowWriterV2Test.cpp b/src/codec/test/RowWriterV2Test.cpp index 527a46b8492..c1b6be0eafd 100644 --- a/src/codec/test/RowWriterV2Test.cpp +++ b/src/codec/test/RowWriterV2Test.cpp @@ -27,6 +27,22 @@ const DateTime dt = {2020, 2, 20, 10, 30, 45, 0}; const Value sVal("Hello world!"); const Value iVal(64); const Time t = {10, 30, 45, 0}; +// POINT(179.0 89.9) +const Geography geogPoint = Point(Coordinate(179.0, 89.9)); +// LINESTRING(0 1, 1 2, 3 7) +const Geography geogLineString = + LineString(std::vector{Coordinate(0, 1), Coordinate(1, 2), Coordinate(3, 7)}); +// POLYGON((-108.7 35.0, -100.0 46.5, -90.7 34.9, -108.7 35.0), +// (-100.1 41.4, -102.9 37.6, -96.8 37.5, -100.1 41.4)) +const Geography geogPolygon = Polygon( + std::vector>{std::vector{Coordinate(-108.7, 35.0), + Coordinate(-100.0, 46.5), + Coordinate(-90.7, 34.9), + Coordinate(-108.7, 35.0)}, + std::vector{Coordinate(-100.1, 41.4), + Coordinate(-102.9, 37.6), + Coordinate(-96.8, 37.5), + Coordinate(-100.1, 41.4)}}); TEST(RowWriterV2, NoDefaultValue) { SchemaWriter schema(12 /*Schema version*/); @@ -45,6 +61,13 @@ TEST(RowWriterV2, NoDefaultValue) { schema.appendCol("Col13", PropertyType::DATETIME); schema.appendCol("Col14", PropertyType::INT64, 0, true); schema.appendCol("Col15", PropertyType::INT32, 0, true); + schema.appendCol( + "Col16", PropertyType::GEOGRAPHY, 0, false, nullptr, meta::cpp2::GeoShape::POINT); + schema.appendCol( + "Col17", PropertyType::GEOGRAPHY, 0, false, nullptr, meta::cpp2::GeoShape::LINESTRING); + schema.appendCol( + "Col18", PropertyType::GEOGRAPHY, 0, false, nullptr, meta::cpp2::GeoShape::POLYGON); + schema.appendCol("Col19", PropertyType::GEOGRAPHY, 0, true, nullptr, meta::cpp2::GeoShape::ANY); ASSERT_EQ(Value::Type::STRING, sVal.type()); ASSERT_EQ(Value::Type::INT, iVal.type()); @@ -65,6 +88,10 @@ TEST(RowWriterV2, NoDefaultValue) { EXPECT_EQ(WriteResult::SUCCEEDED, writer1.set(12, dt)); EXPECT_EQ(WriteResult::SUCCEEDED, writer1.setNull(13)); // Purposely skip the col15 + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.set(15, geogPoint)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.set(16, geogLineString)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer1.set(17, geogPolygon)); + // Purposely skip the col19 ASSERT_EQ(WriteResult::SUCCEEDED, writer1.finish()); RowWriterV2 writer2(&schema); @@ -83,10 +110,16 @@ TEST(RowWriterV2, NoDefaultValue) { EXPECT_EQ(WriteResult::SUCCEEDED, writer2.set("Col13", dt)); EXPECT_EQ(WriteResult::SUCCEEDED, writer2.setNull("Col14")); // Purposely skip the col15 + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.set("Col16", geogPoint)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.set("Col17", geogLineString)); + EXPECT_EQ(WriteResult::SUCCEEDED, writer2.set("Col18", geogPolygon)); + // Purposely skip the col19 ASSERT_EQ(WriteResult::SUCCEEDED, writer2.finish()); std::string encoded1 = std::move(writer1).moveEncodedStr(); std::string encoded2 = writer2.getEncodedStr(); + LOG(INFO) << "encoded1, size=" << encoded1.size() << "content=" << folly::hexlify(encoded1); + LOG(INFO) << "encoded2, size=" << encoded2.size() << "content=" << folly::hexlify(encoded2); auto reader1 = RowReaderWrapper::getRowReader(&schema, encoded1); auto reader2 = RowReaderWrapper::getRowReader(&schema, encoded2); @@ -175,7 +208,7 @@ TEST(RowWriterV2, NoDefaultValue) { EXPECT_EQ(t, v1.getTime()); EXPECT_EQ(v1, v2); - // Col1333 + // Col13 v1 = reader1->getValueByName("Col13"); v2 = reader2->getValueByIndex(12); EXPECT_EQ(Value::Type::DATETIME, v1.type()); @@ -193,6 +226,33 @@ TEST(RowWriterV2, NoDefaultValue) { v2 = reader2->getValueByIndex(14); EXPECT_EQ(Value::Type::NULLVALUE, v1.type()); EXPECT_EQ(v1, v2); + + // Col16 + v1 = reader1->getValueByName("Col16"); + v2 = reader2->getValueByIndex(15); + EXPECT_EQ(Value::Type::GEOGRAPHY, v1.type()); + EXPECT_EQ(geogPoint, v1.getGeography()); + EXPECT_EQ(v1, v2); + + // Col17 + v1 = reader1->getValueByName("Col17"); + v2 = reader2->getValueByIndex(16); + EXPECT_EQ(Value::Type::GEOGRAPHY, v1.type()); + EXPECT_EQ(geogLineString, v1.getGeography()); + EXPECT_EQ(v1, v2); + + // Col18 + v1 = reader1->getValueByName("Col18"); + v2 = reader2->getValueByIndex(17); + EXPECT_EQ(Value::Type::GEOGRAPHY, v1.type()); + EXPECT_EQ(geogPolygon, v1.getGeography()); + EXPECT_EQ(v1, v2); + + // Col19 + v1 = reader1->getValueByName("Col19"); + v2 = reader2->getValueByIndex(18); + EXPECT_EQ(Value::Type::NULLVALUE, v1.type()); + EXPECT_EQ(v1, v2); } TEST(RowWriterV2, WithDefaultValue) { diff --git a/src/common/geo/test/CMakeLists.txt b/src/common/geo/test/CMakeLists.txt index a34a1e0f15d..e1455c56154 100644 --- a/src/common/geo/test/CMakeLists.txt +++ b/src/common/geo/test/CMakeLists.txt @@ -14,8 +14,29 @@ nebula_add_test( $ $ $ - $ LIBRARIES gtest gtest_main ) + +nebula_add_test( + NAME + geo_index_test + SOURCES + GeoIndexTest.cpp + OBJECTS + $ + $ + $ + $ + $ + $ + $ + LIBRARIES + ${ROCKSDB_LIBRARIES} + ${THRIFT_LIBRARIES} + ${PROXYGEN_LIBRARIES} + wangle + gtest + gtest_main +) diff --git a/src/common/geo/test/GeoIndexTest.cpp b/src/common/geo/test/GeoIndexTest.cpp new file mode 100644 index 00000000000..fcd377b7770 --- /dev/null +++ b/src/common/geo/test/GeoIndexTest.cpp @@ -0,0 +1,184 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include + +#include + +#include "common/base/Base.h" +#include "common/geo/GeoIndex.h" + +namespace nebula { +namespace geo { + +std::vector toUint64Vector(std::vector expect) { + auto reinterpretInt64AsUint64 = [](int64_t i) -> uint64_t { + const char* c = reinterpret_cast(&i); + uint64_t u = *reinterpret_cast(c); + return u; + }; + + std::vector transformedExpect; + transformedExpect.reserve(expect.size()); + for (int64_t i : expect) { + transformedExpect.push_back(reinterpretInt64AsUint64(i)); + } + return transformedExpect; +} + +// The tested wkt data is generated by https://clydedacruz.github.io/openstreetmap-wkt-playground/ +TEST(indexCells, point) { + geo::RegionCoverParams rc; + geo::GeoIndex geoIndex(rc); + { + auto point = Geography::fromWKT("POINT(1.0 1.0)").value(); + auto cells = geoIndex.indexCells(point); + EXPECT_EQ(toUint64Vector({1153277837650709461}), cells); + } + { + auto point = Geography::fromWKT("POINT(179.9 -89.999)").value(); + auto cells = geoIndex.indexCells(point); + EXPECT_EQ(toUint64Vector({-5764607523209916331}), cells); + } + { + auto point = Geography::fromWKT("POINT(0.0 0.0)").value(); + auto cells = geoIndex.indexCells(point); + EXPECT_EQ(toUint64Vector({1152921504606846977}), cells); + } + { + auto point = Geography::fromWKT("POINT(-36.843143 79.9999999)").value(); + auto cells = geoIndex.indexCells(point); + EXPECT_EQ(toUint64Vector({5738492864430648919}), cells); + } +} + +TEST(indexCells, lineString) { + geo::RegionCoverParams rc; + geo::GeoIndex geoIndex(rc); + { + auto line = Geography::fromWKT("LINESTRING(1.0 1.0, 2.0 2.0)").value(); + auto cells = geoIndex.indexCells(line); + std::vector expect{ + 1153290940513779712, + 1154047404446580736, + 1154064996699734016, + 1154135365443911680, + 1154164685843464192, + 1154328879221964800, + 1154346471676444672, + }; + EXPECT_EQ(toUint64Vector(expect), cells); + } + { + auto line = Geography::fromWKT( + "LINESTRING(-100.03601074218751 40.74400563300867,-96.96516036987305 " + "39.60634945766583,-91.84398651123048 39.706526341505366)") + .value(); + auto cells = geoIndex.indexCells(line); + std::vector expect{-8676255050873438208, + -8675903207152549888, + -8674214357292285952, + -8665770107990966272, + -8665207158037544960, + -8664644208084123648, + -8664081258130702336, + -8656692539992047616}; + EXPECT_EQ(toUint64Vector(expect), cells); + } + { + auto line = Geography::fromWKT( + "LINESTRING(-109.18024063110352 40.96952973563833,-102.11740493774414 " + "40.98832114106014,-102.00119018554688 37.07120386611709,-108.97098541259767 " + "37.00392356248513,-109.09063339233398 40.94178643285866)") + .value(); + auto cells = geoIndex.indexCells(line); + std::vector expect{-8715591178868752384, + -8714183803985199104, + -8712494954124935168, + -8702080379986640896, + -8699828580172955648, + -8693407432266743808, + -8688569581104529408, + -8686317781290844160}; + EXPECT_EQ(toUint64Vector(expect), cells); + } +} +TEST(indexCells, polygon) { + geo::RegionCoverParams rc; + geo::GeoIndex geoIndex(rc); + { + auto polygon = Geography::fromWKT( + "POLYGON((-105.59286117553711 43.12955341892069,-98.76176834106447 " + "44.11877181138391,-93.97396087646486 " + "38.023348535033705,-105.59286117553711 43.12955341892069))") + .value(); + auto cells = geoIndex.indexCells(polygon); + std::vector expect{-8690821380918214656, + -8686317781290844160, + -8684065981477158912, + -8678436481942945792, + -8665770107990966272, + -8665207158037544960, + -8664644208084123648, + -8662955358223859712}; + EXPECT_EQ(toUint64Vector(expect), cells); + } + { + auto polygon = + Geography::fromWKT( + "POLYGON((-107.24699020385744 45.21638951846552,-91.75283432006836 " + "46.158312926461235,-90.07295608520508 35.17914020576748,-109.77504730224612 " + "38.65334327823746,-107.24699020385744 45.21638951846552))") + .value(); + auto cells = geoIndex.indexCells(polygon); + std::vector expect{5958262307011166208, + 5967269506265907200, + 5994291104030130176, + 6002172403378028544, + -8714465278961909760, + -8702080379986640896, + -8696450880452427776, + -8687443681197686784, + -8678436481942945792, + -8669429282688204800, + -8660422083433463808, + -8651414884178722816}; + EXPECT_EQ(toUint64Vector(expect), cells); + } + { + auto polygon = + Geography::fromWKT( + "POLYGON((-107.17094421386722 51.23698687887105,-100.24475097656253 " + "50.57407993312597,-101.63520812988283 47.57050358015326,-108.1597137451172 " + "47.614032638527846,-107.17094421386722 51.23698687887105),(-106.00682258605956 " + "50.35416859141216,-105.23014068603514 50.212503875989455,-105.55715560913085 " + "49.755319847594194,-106.36962890624999 49.95817799043337,-106.00682258605956 " + "50.35416859141216),(-103.90560150146483 49.21126151433475,-102.1109676361084 " + "49.32232483567492,-102.99759864807127 48.52160729809822,-103.90560150146483 " + "49.21126151433475))") + .value(); + auto cells = geoIndex.indexCells(polygon); + std::vector expect{5969732412312125440, + 5971192563753811968, + 5971491630916567040, + 5972899005800120320, + 5986409804682231808, + 5988661604495917056, + 5990913404309602304, + 5997668803750658048}; + EXPECT_EQ(toUint64Vector(expect), cells); + } +} + +} // namespace geo +} // namespace nebula + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + folly::init(&argc, &argv, true); + google::SetStderrLogging(google::INFO); + + return RUN_ALL_TESTS(); +} From 26299e529271e4ef453de787c9b07328429bf72f Mon Sep 17 00:00:00 2001 From: panda-sheep <59197347+panda-sheep@users.noreply.github.com> Date: Thu, 18 Nov 2021 12:49:03 +0800 Subject: [PATCH 25/53] DSS part10 unify LogStrListIterator for raft and drainer (#3326) * unify LogStrListIterator for raft and drainer * fix format --- src/common/utils/CMakeLists.txt | 5 +++++ .../raftex => common/utils}/LogStrListIterator.cpp | 4 +--- .../raftex => common/utils}/LogStrListIterator.h | 10 ++++------ src/daemons/CMakeLists.txt | 1 + src/interface/common.thrift | 6 ++++++ src/interface/raftex.thrift | 10 ++-------- src/kvstore/raftex/CMakeLists.txt | 1 - src/kvstore/raftex/Host.cpp | 4 ++-- src/kvstore/raftex/RaftPart.cpp | 4 ++-- src/kvstore/raftex/test/CMakeLists.txt | 2 ++ src/kvstore/test/CMakeLists.txt | 1 + src/meta/CMakeLists.txt | 5 +++-- src/storage/test/CMakeLists.txt | 1 + src/tools/db-dump/CMakeLists.txt | 1 + src/tools/db-upgrade/CMakeLists.txt | 1 + src/tools/meta-dump/CMakeLists.txt | 1 + src/tools/simple-kv-verify/CMakeLists.txt | 1 + src/tools/storage-perf/CMakeLists.txt | 1 + 18 files changed, 35 insertions(+), 24 deletions(-) rename src/{kvstore/raftex => common/utils}/LogStrListIterator.cpp (92%) rename src/{kvstore/raftex => common/utils}/LogStrListIterator.h (78%) diff --git a/src/common/utils/CMakeLists.txt b/src/common/utils/CMakeLists.txt index f240cb25f5b..1c909fbc0b2 100644 --- a/src/common/utils/CMakeLists.txt +++ b/src/common/utils/CMakeLists.txt @@ -12,5 +12,10 @@ nebula_add_library( MetaKeyUtils.cpp ) +nebula_add_library( + log_str_list_iterator_obj OBJECT + LogStrListIterator.cpp +) + nebula_add_subdirectory(test) diff --git a/src/kvstore/raftex/LogStrListIterator.cpp b/src/common/utils/LogStrListIterator.cpp similarity index 92% rename from src/kvstore/raftex/LogStrListIterator.cpp rename to src/common/utils/LogStrListIterator.cpp index dfcd452ca17..d33a0ffd0eb 100644 --- a/src/kvstore/raftex/LogStrListIterator.cpp +++ b/src/common/utils/LogStrListIterator.cpp @@ -3,13 +3,12 @@ * This source code is licensed under Apache 2.0 License. */ -#include "kvstore/raftex/LogStrListIterator.h" +#include "common/utils/LogStrListIterator.h" #include "common/base/Base.h" #include "common/thrift/ThriftTypes.h" namespace nebula { -namespace raftex { LogStrListIterator::LogStrListIterator(LogID firstLogId, TermID term, @@ -45,5 +44,4 @@ folly::StringPiece LogStrListIterator::logMsg() const { return logEntries_.at(idx_).get_log_str(); } -} // namespace raftex } // namespace nebula diff --git a/src/kvstore/raftex/LogStrListIterator.h b/src/common/utils/LogStrListIterator.h similarity index 78% rename from src/kvstore/raftex/LogStrListIterator.h rename to src/common/utils/LogStrListIterator.h index f1be123538d..2e724711f6e 100644 --- a/src/kvstore/raftex/LogStrListIterator.h +++ b/src/common/utils/LogStrListIterator.h @@ -3,15 +3,14 @@ * This source code is licensed under Apache 2.0 License. */ -#ifndef RAFTEX_LOGSTRLISTITERATOR_H_ -#define RAFTEX_LOGSTRLISTITERATOR_H_ +#ifndef COMMON_UTILS_LOGSTRLISTITERATOR_H_ +#define COMMON_UTILS_LOGSTRLISTITERATOR_H_ #include "common/base/Base.h" #include "common/utils/LogIterator.h" -#include "interface/gen-cpp2/raftex_types.h" +#include "interface/gen-cpp2/common_types.h" namespace nebula { -namespace raftex { class LogStrListIterator final : public LogIterator { public: @@ -33,7 +32,6 @@ class LogStrListIterator final : public LogIterator { std::vector logEntries_; }; -} // namespace raftex } // namespace nebula -#endif // RAFTEX_LOGSTRLISTITERATOR_H_ +#endif // COMMON_UTILS_LOGSTRLISTITERATOR_H_ diff --git a/src/daemons/CMakeLists.txt b/src/daemons/CMakeLists.txt index 0fcbcbcd8d0..1a92710dc34 100644 --- a/src/daemons/CMakeLists.txt +++ b/src/daemons/CMakeLists.txt @@ -42,6 +42,7 @@ set(storage_meta_deps $ $ $ + $ $ $ $ diff --git a/src/interface/common.thrift b/src/interface/common.thrift index 463fceb2c24..3636deec436 100644 --- a/src/interface/common.thrift +++ b/src/interface/common.thrift @@ -35,6 +35,7 @@ cpp_include "common/datatypes/GeographyOps-inl.h" const binary (cpp.type = "char const *") version = "2.6.0" +typedef i64 (cpp.type = "nebula::ClusterID") ClusterID typedef i32 (cpp.type = "nebula::GraphSpaceID") GraphSpaceID typedef i32 (cpp.type = "nebula::PartitionID") PartitionID typedef i32 (cpp.type = "nebula::TagID") TagID @@ -251,6 +252,11 @@ struct CheckpointInfo { 2: binary path, } +// used for raft and drainer +struct LogEntry { + 1: ClusterID cluster; + 2: binary log_str; +} // These are all data types supported in the graph properties enum PropertyType { diff --git a/src/interface/raftex.thrift b/src/interface/raftex.thrift index d59bd568ff6..8f1a8ea4f47 100644 --- a/src/interface/raftex.thrift +++ b/src/interface/raftex.thrift @@ -5,7 +5,7 @@ namespace cpp nebula.raftex -cpp_include "common/thrift/ThriftTypes.h" +include "common.thrift" enum ErrorCode { SUCCEEDED = 0; @@ -59,12 +59,6 @@ struct AskForVoteResponse { } -struct LogEntry { - 1: ClusterID cluster; - 2: binary log_str; -} - - /* AppendLogRequest serves two purposes: @@ -100,7 +94,7 @@ struct AppendLogRequest { // which specified by log_term // 10: TermID log_term; - 11: list log_str_list; + 11: list log_str_list; } diff --git a/src/kvstore/raftex/CMakeLists.txt b/src/kvstore/raftex/CMakeLists.txt index dcb96522dfe..4056e4a47b8 100644 --- a/src/kvstore/raftex/CMakeLists.txt +++ b/src/kvstore/raftex/CMakeLists.txt @@ -1,6 +1,5 @@ nebula_add_library( raftex_obj OBJECT - LogStrListIterator.cpp RaftPart.cpp RaftexService.cpp Host.cpp diff --git a/src/kvstore/raftex/Host.cpp b/src/kvstore/raftex/Host.cpp index ab79d2b9195..e57dd9cb6bb 100644 --- a/src/kvstore/raftex/Host.cpp +++ b/src/kvstore/raftex/Host.cpp @@ -289,11 +289,11 @@ ErrorOr> Host::prepareA req->set_last_log_id_sent(lastLogIdSent_); req->set_log_term(term); - std::vector logs; + std::vector logs; for (size_t cnt = 0; it->valid() && it->logTerm() == term && cnt < FLAGS_max_appendlog_batch_size; ++(*it), ++cnt) { - cpp2::LogEntry le; + nebula::cpp2::LogEntry le; le.set_cluster(it->logSource()); le.set_log_str(it->logMsg().toString()); logs.emplace_back(std::move(le)); diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index 99dead9f85b..dbceffd4bb9 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -17,10 +17,10 @@ #include "common/thread/NamedThread.h" #include "common/thrift/ThriftClientManager.h" #include "common/time/WallClock.h" +#include "common/utils/LogStrListIterator.h" #include "interface/gen-cpp2/RaftexServiceAsyncClient.h" #include "kvstore/LogEncoder.h" #include "kvstore/raftex/Host.h" -#include "kvstore/raftex/LogStrListIterator.h" #include "kvstore/wal/FileBasedWal.h" DEFINE_uint32(raft_heartbeat_interval_secs, 5, "Seconds between each heartbeat"); @@ -1505,7 +1505,7 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, } // Append new logs - std::vector logEntries = std::vector( + std::vector logEntries = std::vector( std::make_move_iterator(req.get_log_str_list().begin() + diffIndex), std::make_move_iterator(req.get_log_str_list().end())); LogStrListIterator iter(firstId, req.get_log_term(), std::move(logEntries)); diff --git a/src/kvstore/raftex/test/CMakeLists.txt b/src/kvstore/raftex/test/CMakeLists.txt index d5368fae5fa..40982da8a59 100644 --- a/src/kvstore/raftex/test/CMakeLists.txt +++ b/src/kvstore/raftex/test/CMakeLists.txt @@ -2,8 +2,10 @@ set(RAFTEX_TEST_LIBS $ $ $ + $ $ $ + $ $ $ $ diff --git a/src/kvstore/test/CMakeLists.txt b/src/kvstore/test/CMakeLists.txt index 7c872b29eda..a9a844f1786 100644 --- a/src/kvstore/test/CMakeLists.txt +++ b/src/kvstore/test/CMakeLists.txt @@ -5,6 +5,7 @@ set(KVSTORE_TEST_LIBS $ $ $ + $ $ $ $ diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index e3165b35fdf..f5ecbe7bcd8 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -47,8 +47,8 @@ nebula_add_library( processors/admin/CreateSnapshotProcessor.cpp processors/admin/DropSnapshotProcessor.cpp processors/admin/ListSnapshotsProcessor.cpp - processors/job/BalancePlan.cpp - processors/job/BalanceTask.cpp + processors/job/BalancePlan.cpp + processors/job/BalanceTask.cpp processors/admin/AdminClient.cpp processors/admin/SnapShot.cpp processors/admin/CreateBackupProcessor.cpp @@ -129,6 +129,7 @@ set(meta_test_deps $ $ $ + $ $ $ $ diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index c664696de53..f697c3e6be2 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -18,6 +18,7 @@ set(storage_test_deps $ $ $ + $ $ $ $ diff --git a/src/tools/db-dump/CMakeLists.txt b/src/tools/db-dump/CMakeLists.txt index 62f272621a0..8618259bf4b 100644 --- a/src/tools/db-dump/CMakeLists.txt +++ b/src/tools/db-dump/CMakeLists.txt @@ -16,6 +16,7 @@ set(tools_test_deps $ $ $ + $ $ $ $ diff --git a/src/tools/db-upgrade/CMakeLists.txt b/src/tools/db-upgrade/CMakeLists.txt index 584d03acf2d..d43c203ab71 100644 --- a/src/tools/db-upgrade/CMakeLists.txt +++ b/src/tools/db-upgrade/CMakeLists.txt @@ -24,6 +24,7 @@ nebula_add_executable( $ $ $ + $ $ $ $ diff --git a/src/tools/meta-dump/CMakeLists.txt b/src/tools/meta-dump/CMakeLists.txt index c5fa9d565d8..9fe5765a612 100644 --- a/src/tools/meta-dump/CMakeLists.txt +++ b/src/tools/meta-dump/CMakeLists.txt @@ -21,6 +21,7 @@ nebula_add_executable( $ $ $ + $ $ $ $ diff --git a/src/tools/simple-kv-verify/CMakeLists.txt b/src/tools/simple-kv-verify/CMakeLists.txt index 32e8eafa5ee..cf65e874429 100644 --- a/src/tools/simple-kv-verify/CMakeLists.txt +++ b/src/tools/simple-kv-verify/CMakeLists.txt @@ -18,6 +18,7 @@ nebula_add_executable( $ $ $ + $ $ $ $ diff --git a/src/tools/storage-perf/CMakeLists.txt b/src/tools/storage-perf/CMakeLists.txt index ea4d1dfab25..deb1bd67277 100644 --- a/src/tools/storage-perf/CMakeLists.txt +++ b/src/tools/storage-perf/CMakeLists.txt @@ -14,6 +14,7 @@ set(perf_test_deps $ $ $ + $ $ $ $ From a7feb32689e22fd830b4a9249addb32291b6291a Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Thu, 18 Nov 2021 01:14:53 -0500 Subject: [PATCH 26/53] Spelling (#3297) * spelling: applies * spelling: arithmetic * spelling: assignment * spelling: assignments * spelling: assist * spelling: asynchronously * spelling: attached * spelling: attribute * spelling: automatically * spelling: availability * spelling: backed up * spelling: background * spelling: balance * spelling: batches * spelling: because * spelling: begin * spelling: beginning * spelling: being * spelling: between * spelling: bodies * spelling: cache * spelling: calculation * spelling: cancel * spelling: change * spelling: changed * spelling: check * spelling: chosen * spelling: cleanup * spelling: closest * spelling: cluster * spelling: column * spelling: comparison * spelling: completed * spelling: comprehension * spelling: concurrent * spelling: consensus * spelling: console * spelling: consume * spelling: contexts * spelling: convenience * spelling: convenient * spelling: conversion * spelling: correlativities * spelling: corresponding * spelling: corrupted * spelling: current * spelling: custom * spelling: cypher * spelling: daemon * spelling: datetime * spelling: dcheck'd * spelling: default * spelling: definition * spelling: delete * spelling: deletion * spelling: delimiter * spelling: dependencies * spelling: dependency * spelling: descend * spelling: different * spelling: distinct * spelling: divided * spelling: divisors * spelling: doesn't * spelling: double * spelling: dropped * spelling: dummies * spelling: dummy * spelling: duplicate * spelling: embedded * spelling: embedding * spelling: encode * spelling: enough * spelling: equal * spelling: eventually * spelling: executor * spelling: expected * spelling: expression * spelling: extractpropexprvisitor * spelling: failed * spelling: followers * spelling: folly * spelling: forbidden * spelling: found * spelling: function * spelling: further * spelling: futures * spelling: geometry * spelling: global * spelling: handling * spelling: happen * spelling: happened * spelling: have * spelling: hierarchy * spelling: highest * spelling: ignored * spelling: illegal * spelling: incompatible * spelling: inconsistency * spelling: indexes * spelling: indicates * spelling: influence * spelling: information * spelling: interpretation * spelling: interval * spelling: invalid * spelling: judged * spelling: labels * spelling: latest * spelling: length * spelling: limit * spelling: low-level * spelling: manager * spelling: mathematical * spelling: message * spelling: messages * spelling: method * spelling: minimum * spelling: minutes * spelling: mismatch * spelling: moment * spelling: multiple * spelling: multiplied * spelling: need * spelling: negative * spelling: neighbors * spelling: nonexistence * spelling: nonexistent * spelling: object * spelling: occupied * spelling: offset * spelling: operations * spelling: optional * spelling: order * spelling: original * spelling: originated * spelling: other * spelling: output * spelling: param * spelling: parser * spelling: partition * spelling: partitions * spelling: percentage * spelling: permission * spelling: persist * spelling: person * spelling: prefix * spelling: preventing * spelling: previous * spelling: prone * spelling: properties * spelling: property * spelling: purging * spelling: qualified * spelling: readded * spelling: received * spelling: refresh * spelling: register * spelling: related * spelling: requests * spelling: required * spelling: reset * spelling: resources * spelling: result * spelling: retrieve * spelling: returned * spelling: returns * spelling: rewritten * spelling: running * spelling: satisfied * spelling: satisfy * spelling: scanned * spelling: scheduler * spelling: schema * spelling: schemas * spelling: secure * spelling: separate * spelling: service * spelling: session * spelling: seven * spelling: several * spelling: severe * spelling: shaquille * spelling: shorter * spelling: should * spelling: since all * spelling: snapshot * spelling: source * spelling: specific * spelling: specified * spelling: stats * spelling: status * spelling: stickiness * spelling: storage * spelling: structural * spelling: structure * spelling: succeeded * spelling: successfully * spelling: suffix * spelling: support * spelling: supported * spelling: suppress * spelling: suppressions * spelling: synchronous * spelling: tag * spelling: tags_ * spelling: the * spelling: third * spelling: three * spelling: threshold * spelling: topology * spelling: transfer * spelling: transformed * spelling: true * spelling: unchanged * spelling: unify * spelling: unimplemented * spelling: unknown * spelling: until * spelling: usually * spelling: volcano * spelling: variable * spelling: variables * spelling: version * spelling: versions * spelling: vertex * spelling: vertices * spelling: waiting * spelling: will * clang-format-10 fixes for spelling: folly * VertexIDs in RocksEngineTest must not exceed kDefaultVIdLen This changes the spelling of the nonexistent VertexID back to 8 chars long * fix ci for pr3297 * fix conflict Co-authored-by: Josh Soref Co-authored-by: Shylock Hg <33566796+Shylock-Hg@users.noreply.github.com> Co-authored-by: kyle.cao Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .linters/cpp/checkKeyword.py | 4 +- .linters/cpp/cpplint.py | 10 +- cmake/ThriftGenerate.cmake | 2 +- conf/nebula-graphd.conf.default | 2 +- package/package.sh | 4 +- resources/gflags.json | 2 +- scripts/meta-transfer-tools.sh | 2 +- scripts/nebula.service | 2 +- src/clients/meta/FileBasedClusterIdMan.cpp | 2 +- src/clients/meta/MetaClient.cpp | 10 +- src/clients/meta/MetaClient.h | 12 +- src/clients/storage/GraphStorageClient.cpp | 2 +- src/clients/storage/StorageClientBase.h | 2 +- src/codec/RowReaderV1.cpp | 2 +- src/codec/RowReaderV1.h | 6 +- src/codec/RowWriterV2.cpp | 2 +- src/codec/RowWriterV2.h | 4 +- src/codec/test/RowReaderBenchmark.cpp | 4 +- src/codec/test/RowWriterBenchmark.cpp | 2 +- src/common/base/ConcurrentLRUCache.h | 2 +- src/common/base/SignalHandler.h | 4 +- src/common/base/SlowOpTracker.cpp | 2 +- src/common/base/SlowOpTracker.h | 6 +- src/common/base/Status.h | 2 +- src/common/base/StatusOr.h | 8 +- src/common/base/StringUnorderedMap.h | 2 +- src/common/conf/Configuration.cpp | 12 +- src/common/conf/test/ConfigurationTest.cpp | 2 +- src/common/datatypes/Date.h | 2 +- src/common/datatypes/test/CMakeLists.txt | 2 +- src/common/datatypes/test/ValueTest.cpp | 8 +- src/common/datatypes/test/ValueToJsonTest.cpp | 20 +- src/common/expression/Expression.cpp | 2 +- .../expression/LabelAttributeExpression.h | 4 +- src/common/expression/PropertyExpression.h | 2 +- src/common/expression/VariableExpression.h | 4 +- .../test/ConstantExpressionTest.cpp | 4 +- .../expression/test/ExpressionBenchmark.cpp | 2 +- .../expression/test/ExpressionContextMock.h | 2 +- src/common/fs/FileUtils.h | 2 +- src/common/fs/test/FileUtilsTest.cpp | 2 +- src/common/fs/test/TempFileTest.cpp | 2 +- src/common/function/FunctionManager.cpp | 6 +- .../function/test/FunctionManagerTest.cpp | 8 +- src/common/geo/GeoFunction.cpp | 4 +- src/common/geo/io/wkb/WKBWriter.cpp | 2 +- src/common/geo/io/wkt/WKTWriter.cpp | 2 +- src/common/graph/Response.h | 2 +- .../graph/tests/ResponseEncodeDecodeTest.cpp | 4 +- src/common/meta/GflagsManager.cpp | 2 +- src/common/network/NetworkUtils.cpp | 4 +- src/common/network/NetworkUtils.h | 2 +- src/common/network/test/NetworkUtilsTest.cpp | 2 +- .../fulltext/test/FulltextPluginTest.cpp | 16 +- src/common/process/ProcessUtils.cpp | 2 +- src/common/stats/StatsManager.cpp | 4 +- src/common/stats/StatsManager.h | 2 +- src/common/thread/GenericThreadPool.h | 2 +- src/common/thread/GenericWorker.cpp | 6 +- src/common/thread/GenericWorker.h | 6 +- src/common/time/TimeConversion.h | 2 +- src/common/time/TimeUtils.cpp | 6 +- src/common/time/TimeUtils.h | 2 +- src/common/time/test/TimeUtilsTest.cpp | 2 +- src/common/utils/MemoryLockWrapper.h | 2 +- src/common/utils/MetaKeyUtils.cpp | 8 +- src/common/utils/MetaKeyUtils.h | 2 +- src/common/utils/NebulaKeyUtils.h | 4 +- src/daemons/MetaDaemon.cpp | 2 +- src/graph/context/ExecutionContext.cpp | 2 +- src/graph/context/ExecutionContext.h | 2 +- src/graph/context/ValidateContext.h | 6 +- .../executor/admin/BalanceLeadersExecutor.h | 29 + .../executor/admin/KillQueryExecutor.cpp | 2 +- .../executor/admin/ShowHostsExecutor.cpp | 10 +- .../executor/admin/ShowStatsExecutor.cpp | 2 +- src/graph/executor/admin/SnapshotExecutor.cpp | 2 +- .../executor/algo/ProduceAllPathsExecutor.cpp | 2 +- .../algo/ProduceSemiShortestPathExecutor.cpp | 2 +- .../query/UnionAllVersionVarExecutor.cpp | 2 +- .../executor/test/CartesianProductTest.cpp | 2 +- src/graph/executor/test/DedupTest.cpp | 14 +- src/graph/executor/test/FilterTest.cpp | 16 +- src/graph/executor/test/LimitTest.cpp | 10 +- .../executor/test/ProduceAllPathsTest.cpp | 6 +- .../test/ProduceSemiShortestPathTest.cpp | 6 +- src/graph/executor/test/SampleTest.cpp | 16 +- src/graph/executor/test/SetExecutorTest.cpp | 2 +- src/graph/executor/test/SortTest.cpp | 14 +- src/graph/executor/test/TopNTest.cpp | 14 +- .../rule/GeoPredicateIndexScanBaseRule.cpp | 2 +- src/graph/optimizer/rule/IndexScanRule.cpp | 2 +- src/graph/planner/PlannersRegister.cpp | 14 +- src/graph/planner/PlannersRegister.h | 8 +- src/graph/planner/match/LabelIndexSeek.cpp | 14 +- src/graph/planner/match/MatchSolver.cpp | 2 +- src/graph/planner/match/PropIndexSeek.cpp | 2 +- src/graph/planner/plan/Admin.h | 2 +- src/graph/planner/plan/PlanNode.cpp | 2 +- src/graph/planner/plan/PlanNode.h | 2 +- src/graph/planner/plan/Query.h | 2 +- .../scheduler/AsyncMsgNotifyBasedScheduler.h | 2 +- src/graph/service/GraphFlags.cpp | 2 +- src/graph/service/QueryEngine.cpp | 2 +- src/graph/service/RequestContext.h | 2 +- src/graph/util/ExpressionUtils.cpp | 6 +- src/graph/validator/AdminJobValidator.h | 2 +- src/graph/validator/FetchVerticesValidator.h | 2 +- src/graph/validator/LookupValidator.cpp | 2 +- src/graph/validator/MaintainValidator.cpp | 2 +- src/graph/validator/MatchValidator.cpp | 4 +- src/graph/validator/Validator.h | 2 +- src/graph/validator/test/FetchEdgesTest.cpp | 4 +- .../validator/test/FetchVerticesTest.cpp | 4 +- src/graph/validator/test/MockSchemaManager.h | 2 +- .../validator/test/MutateValidatorTest.cpp | 6 +- .../validator/test/QueryValidatorTest.cpp | 2 +- .../validator/test/ValidatorTestBase.cpp | 2 +- src/graph/validator/test/ValidatorTestBase.h | 2 +- .../validator/test/YieldValidatorTest.cpp | 2 +- src/graph/visitor/ExtractPropExprVisitor.h | 6 +- src/graph/visitor/FoldConstantExprVisitor.cpp | 2 +- src/graph/visitor/FoldConstantExprVisitor.h | 2 +- .../test/RewriteRelExprVisitorTest.cpp | 2 +- .../test/RewriteUnaryNotExprVisitorTest.cpp | 2 +- src/graph/visitor/test/VisitorTestBase.h | 4 +- src/interface/common.thrift | 4 +- src/interface/storage.thrift | 28 +- src/kvstore/DiskManager.h | 4 +- src/kvstore/Listener.cpp | 2 +- src/kvstore/NebulaStore.cpp | 4 +- src/kvstore/Part.cpp | 4 +- src/kvstore/Part.h | 10 +- src/kvstore/RateLimiter.h | 10 +- src/kvstore/RocksEngine.cpp | 4 +- src/kvstore/RocksEngine.h | 2 +- .../plugins/elasticsearch/ESListener.cpp | 4 +- src/kvstore/plugins/hbase/HBaseStore.h | 4 +- src/kvstore/plugins/hbase/hbase.thrift | 6 +- .../plugins/hbase/test/HBaseStoreTest.cpp | 8 +- src/kvstore/raftex/RaftPart.cpp | 16 +- src/kvstore/raftex/RaftPart.h | 4 +- .../raftex/test/LeaderElectionTest.cpp | 2 +- src/kvstore/raftex/test/LogAppendTest.cpp | 2 +- src/kvstore/raftex/test/RaftCase.cpp | 4 +- src/kvstore/raftex/test/RaftexTestBase.cpp | 8 +- src/kvstore/test/LogEncoderTest.cpp | 18 +- src/kvstore/test/NebulaListenerTest.cpp | 61 +- src/kvstore/test/NebulaStoreTest.cpp | 2 +- src/kvstore/test/RateLimiterTest.cpp | 6 +- src/kvstore/test/RocksEngineTest.cpp | 24 +- src/kvstore/wal/AtomicLogBuffer.h | 2 +- src/kvstore/wal/FileBasedWal.cpp | 6 +- src/kvstore/wal/test/InMemoryLogBufferList.h | 2 +- src/kvstore/wal/test/LogBufferBenchmark.cpp | 4 +- src/meta/KVBasedClusterIdMan.h | 2 +- src/meta/MetaServiceUtils.cpp | 6 +- src/meta/processors/BaseProcessor-inl.h | 4 +- src/meta/processors/BaseProcessor.h | 2 +- src/meta/processors/admin/AdminClient.cpp | 6 +- src/meta/processors/admin/Balancer.cpp | 1232 +++++++++++++++++ src/meta/processors/admin/Balancer.h | 269 ++++ .../admin/CreateBackupProcessor.cpp | 2 +- .../admin/CreateSnapshotProcessor.cpp | 2 +- src/meta/processors/admin/HBProcessor.cpp | 6 +- .../admin/VerifyClientVersionProcessor.cpp | 2 +- src/meta/processors/job/GetStatsProcessor.cpp | 4 +- src/meta/processors/job/JobManager.cpp | 8 +- src/meta/processors/job/JobManager.h | 4 +- src/meta/processors/job/MetaJobExecutor.cpp | 8 +- src/meta/processors/job/StatsJobExecutor.cpp | 10 +- src/meta/processors/job/StatsJobExecutor.h | 2 +- src/meta/processors/kv/RemoveProcessor.h | 2 +- .../processors/parts/DropSpaceProcessor.cpp | 8 +- .../processors/parts/ListHostsProcessor.cpp | 4 +- .../processors/schema/GetEdgeProcessor.cpp | 2 +- .../processors/schema/GetTagProcessor.cpp | 2 +- .../processors/zone/DropGroupProcessor.cpp | 2 +- .../processors/zone/DropZoneProcessor.cpp | 2 +- src/meta/test/BalanceIntegrationTest.cpp | 4 +- src/meta/test/BalancerTest.cpp | 4 +- src/meta/test/ConfigManTest.cpp | 2 +- src/meta/test/GetStatsTest.cpp | 78 +- src/meta/test/JobManagerTest.cpp | 10 +- src/meta/test/MetaClientTest.cpp | 2 +- src/meta/test/ProcessorTest.cpp | 8 +- src/parser/AdminSentences.h | 10 +- src/parser/parser.yy | 18 +- src/parser/test/ScannerTest.cpp | 6 +- src/storage/BaseProcessor-inl.h | 6 +- src/storage/StorageFlags.cpp | 2 +- src/storage/admin/AdminProcessor.h | 2 +- src/storage/admin/AdminTask.h | 6 +- src/storage/admin/AdminTaskManager.cpp | 2 +- src/storage/admin/RebuildIndexTask.cpp | 12 +- src/storage/admin/StatsTask.cpp | 24 +- src/storage/exec/AggregateNode.h | 2 +- src/storage/exec/FilterNode.h | 2 +- src/storage/exec/HashJoinNode.h | 2 +- src/storage/exec/IndexScanNode.h | 20 +- src/storage/exec/IndexSelectionNode.h | 2 +- src/storage/exec/IndexVertexScanNode.cpp | 4 +- src/storage/exec/IndexVertexScanNode.h | 2 +- src/storage/exec/StoragePlan.h | 4 +- src/storage/exec/UpdateNode.h | 8 +- src/storage/query/GetPropProcessor.cpp | 4 +- src/storage/query/QueryBaseProcessor.h | 4 +- .../AddAndUpdateVertexAndEdgeBenchmark.cpp | 16 +- src/storage/test/AdminTaskManagerTest.cpp | 6 +- src/storage/test/CMakeLists.txt | 2 +- src/storage/test/CompactionTest.cpp | 6 +- src/storage/test/GetNeighborsBenchmark.cpp | 8 +- src/storage/test/IndexWithTTLTest.cpp | 8 +- src/storage/test/KVClientTest.cpp | 4 +- src/storage/test/LookupIndexTest.cpp | 2 +- src/storage/test/QueryTestUtils.h | 4 +- src/storage/test/RebuildIndexTest.cpp | 16 +- src/storage/test/ScanEdgePropBenchmark.cpp | 4 +- src/storage/test/StatsTaskTest.cpp | 6 +- .../test/StorageHttpAdminHandlerTest.cpp | 4 +- .../test/StorageHttpStatsHandlerTest.cpp | 2 +- .../test/StorageIndexWriteBenchmark.cpp | 8 +- src/storage/test/UpdateEdgeTest.cpp | 8 +- src/storage/test/UpdateVertexTest.cpp | 10 +- .../ChainAddEdgesProcessorLocal.cpp | 6 +- .../transaction/ChainAddEdgesProcessorLocal.h | 8 +- src/storage/transaction/ConsistUtil.cpp | 2 +- .../transaction/ResumeUpdateProcessor.h | 2 +- .../ResumeUpdateRemoteProcessor.cpp | 2 +- .../transaction/ResumeUpdateRemoteProcessor.h | 2 +- src/tools/db-dump/DbDumpTool.cpp | 14 +- src/tools/db-dump/DbDumper.cpp | 12 +- src/tools/db-upgrade/DbUpgrader.cpp | 6 +- src/tools/db-upgrade/DbUpgrader.h | 4 +- src/tools/db-upgrade/DbUpgraderTool.cpp | 4 +- src/tools/db-upgrade/NebulaKeyUtilsV1.h | 2 +- src/tools/db-upgrade/NebulaKeyUtilsV2.h | 8 +- .../storage-perf/StorageIntegrityTool.cpp | 4 +- src/tools/storage-perf/StoragePerfTool.cpp | 10 +- tests/admin/test_configs.py | 6 +- tests/admin/test_listener.py | 2 +- tests/bench/data_generate.py | 2 +- tests/bench/delete.py | 4 +- tests/bench/lookup.py | 4 +- tests/common/plan_differ.py | 10 +- tests/common/utils.py | 2 +- tests/data/nba.ngql | 20 +- tests/data/nba/like.csv | 6 +- tests/data/nba/player.csv | 2 +- tests/data/nba/serve.csv | 12 +- tests/job/test_session.py | 2 +- tests/query/stateless/test_update.py | 8 +- .../features/bugfix/MatchUsedInPipe.feature | 4 +- .../bugfix/SubgraphBeforePipe.feature | 48 +- .../delete/DeleteVertex.IntVid.feature | 2 +- tests/tck/features/expression/Case.feature | 16 +- .../features/expression/FunctionCall.feature | 16 +- .../tck/features/expression/Predicate.feature | 4 +- .../expression/RelationalExpr.feature | 14 +- .../tck/features/expression/UnaryExpr.feature | 4 +- .../fetch/FetchVertices.intVid.feature | 6 +- .../fetch/FetchVertices.strVid.feature | 28 +- tests/tck/features/geo/GeoBase.feature | 6 +- tests/tck/features/go/GO.IntVid.feature | 44 +- tests/tck/features/go/GO.feature | 44 +- .../tck/features/go/GoYieldVertexEdge.feature | 62 +- .../tck/features/insert/Insert.IntVid.feature | 2 +- tests/tck/features/insert/Insert.feature | 2 +- tests/tck/features/lookup/ByIndex.feature | 100 +- .../features/lookup/ByIndex.intVid.feature | 100 +- tests/tck/features/lookup/Output.feature | 4 +- .../tck/features/lookup/Output.intVid.feature | 4 +- .../features/lookup/TagIndexFullScan.feature | 2 +- tests/tck/features/match/Base.IntVid.feature | 2 +- tests/tck/features/match/Base.feature | 14 +- .../features/match/MatchById.IntVid.feature | 14 +- tests/tck/features/match/MatchById.feature | 14 +- tests/tck/features/match/MatchGroupBy.feature | 34 +- .../features/match/PipeAndVariable.feature | 4 +- tests/tck/features/match/SeekByEdge.feature | 88 +- .../match/VariableLengthPattern.feature | 4 +- .../VariableLengthPattern.intVid.feature | 4 +- tests/tck/features/match/With.feature | 6 +- tests/tck/features/match/ZeroStep.feature | 20 +- .../features/match/ZeroStep.intVid.feature | 20 +- .../features/optimizer/IndexScanRule.feature | 8 +- .../PushFilterDownProjectRule.feature | 18 +- .../tck/features/path/AllPath.IntVid.feature | 24 +- tests/tck/features/path/AllPath.feature | 24 +- .../features/path/ShortestPath.IntVid.feature | 180 +-- tests/tck/features/path/ShortestPath.feature | 180 +-- tests/tck/features/schema/Comment.feature | 6 +- tests/tck/features/schema/Schema.feature | 30 +- .../features/subgraph/subgraph.IntVid.feature | 826 +++++------ tests/tck/features/subgraph/subgraph.feature | 780 +++++------ .../tck/features/update/Update.IntVid.feature | 2 +- tests/tck/features/update/Update.feature | 4 +- .../VerifyClientVersion.feature | 2 +- tests/tck/job/Job.feature | 2 +- .../features/expressions/map/Map1.feature | 2 +- third-party/install-gcc.sh | 2 +- third-party/install-third-party.sh | 2 +- 302 files changed, 3649 insertions(+), 2118 deletions(-) create mode 100644 src/graph/executor/admin/BalanceLeadersExecutor.h create mode 100644 src/meta/processors/admin/Balancer.cpp create mode 100644 src/meta/processors/admin/Balancer.h diff --git a/.linters/cpp/checkKeyword.py b/.linters/cpp/checkKeyword.py index af8916cf1a9..11ef9ea941d 100755 --- a/.linters/cpp/checkKeyword.py +++ b/.linters/cpp/checkKeyword.py @@ -8,7 +8,7 @@ import re import sys -PASER_FILE_PATH = 'src/parser/parser.yy' +PARSER_FILE_PATH = 'src/parser/parser.yy' SCANNER_FILE_PATH = 'src/parser/scanner.lex' reserved_key_words = [ @@ -139,7 +139,7 @@ def get_unreserved_keyword(file_path): if len(keywords) == 0: exit(0) - unreserved_key_words = get_unreserved_keyword(PASER_FILE_PATH) + unreserved_key_words = get_unreserved_keyword(PARSER_FILE_PATH) new_key_words = [word for word in keywords if word not in reserved_key_words] if len(new_key_words) == 0: exit(0) diff --git a/.linters/cpp/cpplint.py b/.linters/cpp/cpplint.py index 11a13acf48d..2d53e233bbb 100755 --- a/.linters/cpp/cpplint.py +++ b/.linters/cpp/cpplint.py @@ -641,7 +641,7 @@ # Files to exclude from linting. This is set by the --exclude flag. _excludes = None -# Whether to supress PrintInfo messages +# Whether to suppress PrintInfo messages _quiet = False # The allowed line length of files. @@ -752,7 +752,7 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error): 'Unknown NOLINT error category: %s' % category) -def ProcessGlobalSuppresions(lines): +def ProcessGlobalSuppressions(lines): """Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. @@ -780,7 +780,7 @@ def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by - ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. + ParseNolintSuppressions/ProcessGlobalSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. @@ -1013,7 +1013,7 @@ def __init__(self): self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts - self.quiet = False # Suppress non-error messagess? + self.quiet = False # Suppress non-error messages? # output format: # "emacs" - format that emacs can parse (default) @@ -6202,7 +6202,7 @@ def ProcessFileData(filename, file_extension, lines, error, ResetNolintSuppressions() CheckForCopyright(filename, lines, error) - ProcessGlobalSuppresions(lines) + ProcessGlobalSuppressions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) diff --git a/cmake/ThriftGenerate.cmake b/cmake/ThriftGenerate.cmake index 8453a9fcb7a..d82af8ef678 100644 --- a/cmake/ThriftGenerate.cmake +++ b/cmake/ThriftGenerate.cmake @@ -32,7 +32,7 @@ endmacro() # @output_path - The directory where the thrift file lives # # Output: -# file-cpp2-target - A custom target to add a dependenct +# file-cpp2-target - A custom target to add a dependency # ${file-cpp2-HEADERS} - The generated Header Files. # ${file-cpp2-SOURCES} - The generated Source Files. # diff --git a/conf/nebula-graphd.conf.default b/conf/nebula-graphd.conf.default index 43792df94c3..fc2432a20f3 100644 --- a/conf/nebula-graphd.conf.default +++ b/conf/nebula-graphd.conf.default @@ -7,7 +7,7 @@ --enable_optimizer=true # The default charset when a space is created --default_charset=utf8 -# The defaule collate when a space is created +# The default collate when a space is created --default_collate=utf8_bin # Whether to use the configuration obtained from the configuration file --local_config=true diff --git a/package/package.sh b/package/package.sh index bab842051e0..23abc71482f 100755 --- a/package/package.sh +++ b/package/package.sh @@ -183,7 +183,7 @@ function _find_dump_syms_tool { function _strip_unnecessary_binaries { for bin in $(ls -1 -F ${build_dir}/bin/ | grep -v [/$] | sed -e '/nebula-metad/d;/nebula-graphd/d;/nebula-storaged/d'); do if ! (strip ${build_dir}/bin/${bin}); then - echo ">>> strip ${bin} faild: $?. <<<" + echo ">>> strip ${bin} failed: $?. <<<" exit 1 fi done @@ -203,7 +203,7 @@ function dump_syms { for bin in nebula-graphd nebula-storaged nebula-metad; do if ! (${dump_syms} ${build_dir}/bin/${bin} > ${syms_dir}/${bin}${ver}.sym); then - echo ">>> dump ${bin} symbols faild: $?. <<<" + echo ">>> dump ${bin} symbols failed: $?. <<<" exit 1 fi done diff --git a/resources/gflags.json b/resources/gflags.json index 48683f75048..e75f960e71a 100644 --- a/resources/gflags.json +++ b/resources/gflags.json @@ -4,7 +4,7 @@ "v", "heartbeat_interval_secs", "meta_client_retry_times", - "slow_op_threshhold_ms", + "slow_op_threshold_ms", "clean_wal_interval_secs", "wal_ttl", "clean_wal_interval_secs", diff --git a/scripts/meta-transfer-tools.sh b/scripts/meta-transfer-tools.sh index 3bb316d47af..6e773ab9493 100755 --- a/scripts/meta-transfer-tools.sh +++ b/scripts/meta-transfer-tools.sh @@ -16,7 +16,7 @@ Usage="this tool is a simple wrapper to scp to copy local folder of metad to ano 1. -f (from) local metad dir \n \ 2. -t (to) remote destination \n \ 3. -u (update) any configs need to be changed \n \ - different configs should be seperated by ':' \n \ + different configs should be separated by ':' \n \ each config has to be the form of "local_ip=172.0.0.1" \n \ \n \ for example \n \ diff --git a/scripts/nebula.service b/scripts/nebula.service index 64739bb20aa..04f38c074f6 100755 --- a/scripts/nebula.service +++ b/scripts/nebula.service @@ -18,7 +18,7 @@ SCRIPT_PATH=$(readlink -f $0) # Directory of this script SCRIPT_DIR=$(dirname ${SCRIPT_PATH}) # Installation directory, i.e. parent of SCRIPT_DIR -# For now we assume that the directory hierachy of the nebula installation is: +# For now we assume that the directory hierarchy of the nebula installation is: # root/bin, root/etc/, root/scripts, etc. INSTALL_ROOT=$(cd ${SCRIPT_DIR}/.. &>/dev/null; pwd) UTILS_PATH=${SCRIPT_DIR}/utils.sh diff --git a/src/clients/meta/FileBasedClusterIdMan.cpp b/src/clients/meta/FileBasedClusterIdMan.cpp index 15212fbc030..7043729cf9e 100644 --- a/src/clients/meta/FileBasedClusterIdMan.cpp +++ b/src/clients/meta/FileBasedClusterIdMan.cpp @@ -31,7 +31,7 @@ bool FileBasedClusterIdMan::persistInFile(ClusterID clusterId, const std::string ::close(fd); return false; } - LOG(INFO) << "Persiste clusterId " << clusterId << " succeeded!"; + LOG(INFO) << "Persist clusterId " << clusterId << " succeeded!"; ::close(fd); return true; } diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index 8953947e0cb..21880d788ae 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -134,7 +134,7 @@ void MetaClient::heartBeatThreadFunc() { return; } - // if MetaServer has some changes, refesh the localCache_ + // if MetaServer has some changes, refresh the localCache_ loadData(); loadCfg(); } @@ -799,7 +799,7 @@ Status MetaClient::handleResponse(const RESP& resp) { case nebula::cpp2::ErrorCode::E_CONFLICT: return Status::Error("Conflict!"); case nebula::cpp2::ErrorCode::E_INVALID_PARM: - return Status::Error("Invalid parm!"); + return Status::Error("Invalid param!"); case nebula::cpp2::ErrorCode::E_WRONGCLUSTER: return Status::Error("Wrong cluster!"); case nebula::cpp2::ErrorCode::E_STORE_FAILURE: @@ -814,8 +814,8 @@ Status MetaClient::handleResponse(const RESP& resp) { return Status::Error("No running balance plan!"); case nebula::cpp2::ErrorCode::E_NO_VALID_HOST: return Status::Error("No valid host hold the partition!"); - case nebula::cpp2::ErrorCode::E_CORRUPTTED_BALANCE_PLAN: - return Status::Error("No corrupted blance plan!"); + case nebula::cpp2::ErrorCode::E_CORRUPTED_BALANCE_PLAN: + return Status::Error("No corrupted balance plan!"); case nebula::cpp2::ErrorCode::E_INVALID_PASSWORD: return Status::Error("Invalid password!"); case nebula::cpp2::ErrorCode::E_IMPROPER_ROLE: @@ -2424,7 +2424,7 @@ folly::Future> MetaClient::heartbeat() { [](auto client, auto request) { return client->future_heartBeat(request); }, [this](cpp2::HBResp&& resp) -> bool { if (options_.role_ == cpp2::HostRole::STORAGE && options_.clusterId_.load() == 0) { - LOG(INFO) << "Persisit the cluster Id from metad " << resp.get_cluster_id(); + LOG(INFO) << "Persist the cluster Id from metad " << resp.get_cluster_id(); if (FileBasedClusterIdMan::persistInFile(resp.get_cluster_id(), FLAGS_cluster_id_path)) { options_.clusterId_.store(resp.get_cluster_id()); } else { diff --git a/src/clients/meta/MetaClient.h b/src/clients/meta/MetaClient.h index a80204f9609..4e19f2e79ee 100644 --- a/src/clients/meta/MetaClient.h +++ b/src/clients/meta/MetaClient.h @@ -139,7 +139,7 @@ using UserRolesMap = std::unordered_map // get user password by account using UserPasswordMap = std::unordered_map; -// config cahce, get config via module and name +// config cache, get config via module and name using MetaConfigMap = std::unordered_map, cpp2::ConfigItem>; @@ -186,7 +186,7 @@ struct MetaClientOptions { // Current host address HostAddr localHost_{"", 0}; - // Current cluster Id, it is requried by storaged only. + // Current cluster Id, it is required by storaged only. std::atomic clusterId_{0}; // If current client being used in storaged. bool inStoraged_ = false; @@ -410,7 +410,7 @@ class MetaClient { folly::Future>> listSnapshots(); - // Opeartions for listener. + // Operations for listener. folly::Future> addListener(GraphSpaceID spaceId, cpp2::ListenerType type, @@ -442,7 +442,7 @@ class MetaClient { StatusOr> getFTClientsFromCache(); - // Opeartions for fulltext index. + // Operations for fulltext index. folly::Future> createFTIndex(const std::string& name, const cpp2::FTIndex& index); @@ -477,7 +477,7 @@ class MetaClient { folly::Future> killQuery( std::unordered_map> killQueries); - // Opeartions for cache. + // Operations for cache. StatusOr getSpaceIdByNameFromCache(const std::string& name); StatusOr getSpaceNameByIdFromCache(GraphSpaceID spaceId); @@ -504,7 +504,7 @@ class MetaClient { StatusOr getEdgeNameByTypeFromCache(const GraphSpaceID& space, const EdgeType edgeType); - // get all lastest version edge + // get all latest version edge StatusOr> getAllEdgeFromCache(const GraphSpaceID& space); PartsMap getPartsMapFromCache(const HostAddr& host); diff --git a/src/clients/storage/GraphStorageClient.cpp b/src/clients/storage/GraphStorageClient.cpp index 206502e10e8..85af874cc19 100644 --- a/src/clients/storage/GraphStorageClient.cpp +++ b/src/clients/storage/GraphStorageClient.cpp @@ -635,7 +635,7 @@ StatusOr> GraphStorageClient::getIdFr if (vidType == PropertyType::INT64) { if (isEdgeProps) { cb = [](const Row& r) -> const VertexID& { - // The first column has to be the src, the thrid column has to be the + // The first column has to be the src, the third column has to be the // dst DCHECK_EQ(Value::Type::INT, r.values[0].type()); DCHECK_EQ(Value::Type::INT, r.values[3].type()); diff --git a/src/clients/storage/StorageClientBase.h b/src/clients/storage/StorageClientBase.h index a5bc2054114..8ee65d321ae 100644 --- a/src/clients/storage/StorageClientBase.h +++ b/src/clients/storage/StorageClientBase.h @@ -60,7 +60,7 @@ class StorageRpcResponse final { ++failedReqs_; } - // A value between [0, 100], representing a precentage + // A value between [0, 100], representing a percentage int32_t completeness() const { std::lock_guard g(*lock_); DCHECK_NE(totalReqsSent_, 0); diff --git a/src/codec/RowReaderV1.cpp b/src/codec/RowReaderV1.cpp index bcbfcd7d97f..84085845b2e 100644 --- a/src/codec/RowReaderV1.cpp +++ b/src/codec/RowReaderV1.cpp @@ -440,7 +440,7 @@ Value RowReaderV1::getVid(int64_t index) const noexcept { /************************************************************ * - * Low -level functions to read from the bytes + * Low-level functions to read from the bytes * ***********************************************************/ int32_t RowReaderV1::readInteger(int64_t offset, int64_t& v) const noexcept { diff --git a/src/codec/RowReaderV1.h b/src/codec/RowReaderV1.h index 55a59bc2bef..22760da1982 100644 --- a/src/codec/RowReaderV1.h +++ b/src/codec/RowReaderV1.h @@ -41,7 +41,7 @@ class RowReaderV1 : public RowReader { private: int32_t headerLen_ = 0; int32_t numBytesForOffset_ = 0; - // Block offet value is composed by two integers. The first one is + // Block offset value is composed by two integers. The first one is // the block offset, the second one is the largest index being visited // in the block. This index is zero-based mutable std::vector> blockOffsets_; @@ -53,7 +53,7 @@ class RowReaderV1 : public RowReader { private: RowReaderV1() = default; - // Process the row header infomation + // Process the row header information // Returns false when the row data is invalid bool processHeader(folly::StringPiece row); @@ -71,7 +71,7 @@ class RowReaderV1 : public RowReader { int64_t skipToNext(int64_t index, int64_t offset) const noexcept; // Skip to the {index}Th field - // The method retuns the offset of the field + // The method returns the offset of the field // It returns a negative number when the data corrupts int64_t skipToField(int64_t index) const noexcept; diff --git a/src/codec/RowWriterV2.cpp b/src/codec/RowWriterV2.cpp index d5b1d0e74d0..19378235fc5 100644 --- a/src/codec/RowWriterV2.cpp +++ b/src/codec/RowWriterV2.cpp @@ -57,7 +57,7 @@ RowWriterV2::RowWriterV2(const meta::SchemaProviderIf* schema) header = 0x0E; // 0x08 | 0x06, six bytes for the schema version headerLen_ = 7; } else if (ver < 0x00FFFFFFFFFFFFFF) { - header = 0x0F; // 0x08 | 0x07, severn bytes for the schema version + header = 0x0F; // 0x08 | 0x07, seven bytes for the schema version headerLen_ = 8; } else { LOG(FATAL) << "Schema version too big"; diff --git a/src/codec/RowWriterV2.h b/src/codec/RowWriterV2.h index 6b45a76550f..151c0f9dc6e 100644 --- a/src/codec/RowWriterV2.h +++ b/src/codec/RowWriterV2.h @@ -35,7 +35,7 @@ enum class WriteResult { Version 1: v v v 0 0 b b b In version 1, the middle two bits are always zeros. The left three bits - indicats the number of bytes used for the schema version, while the right + indicates the number of bytes used for the schema version, while the right three bits indicates the number of bytes used for the block offsets Version 2: @@ -144,7 +144,7 @@ class RowWriterV2 { const meta::SchemaProviderIf* schema_; std::string buf_; std::vector isSet_; - // Ther number of bytes ocupied by header and the schema version + // The number of bytes occupied by header and the schema version size_t headerLen_; size_t numNullBytes_; size_t approxStrLen_; diff --git a/src/codec/test/RowReaderBenchmark.cpp b/src/codec/test/RowReaderBenchmark.cpp index dc651814f71..54eaa6d889b 100644 --- a/src/codec/test/RowReaderBenchmark.cpp +++ b/src/codec/test/RowReaderBenchmark.cpp @@ -152,7 +152,7 @@ void randomTest(SchemaWriter* schema, } /************************* - * Begining of Tests + * Beginning of Tests ************************/ TEST(RowReader, SequentialShort) { sequentialTest(&schemaShort, dataShortV1, dataShortV2); } @@ -166,7 +166,7 @@ TEST(RowReader, RandomLong) { randomTest(&schemaLong, dataLongV1, dataLongV2, lo ************************/ /************************* - * Begining of benchmarks + * Beginning of benchmarks ************************/ BENCHMARK(seq_read_short_v1, iters) { sequentialRead(&schemaShort, dataShortV1, iters); } BENCHMARK_RELATIVE(seq_read_short_v2, iters) { sequentialRead(&schemaShort, dataShortV2, iters); } diff --git a/src/codec/test/RowWriterBenchmark.cpp b/src/codec/test/RowWriterBenchmark.cpp index 94b4a86985f..ae35c6156ec 100644 --- a/src/codec/test/RowWriterBenchmark.cpp +++ b/src/codec/test/RowWriterBenchmark.cpp @@ -64,7 +64,7 @@ void writeDataV2(SchemaWriter* schema, int32_t iters) { } /************************* - * Begining of benchmarks + * Beginning of benchmarks ************************/ BENCHMARK(WriteShortRowV1, iters) { writeDataV1(&schemaShort, iters); } diff --git a/src/common/base/ConcurrentLRUCache.h b/src/common/base/ConcurrentLRUCache.h index 775d175f713..7893039319e 100644 --- a/src/common/base/ConcurrentLRUCache.h +++ b/src/common/base/ConcurrentLRUCache.h @@ -159,7 +159,7 @@ class ConcurrentLRUCache final { /** It is copied from boost::compute::detail::LRU. The differences: - 1. Add methed evict(const K& key); + 1. Add method evict(const K& key); 2. Instead std::map with std::unordered_map 3. Update the code style. 4. Add stats diff --git a/src/common/base/SignalHandler.h b/src/common/base/SignalHandler.h index 5950eacec6c..5e639d75214 100644 --- a/src/common/base/SignalHandler.h +++ b/src/common/base/SignalHandler.h @@ -12,7 +12,7 @@ #include "common/base/Status.h" /** - * SignalHandler is a singleton to do the basic signal hanling, + * SignalHandler is a singleton to do the basic signal handling, * mainly used in a daemon executable. * * By default, it ignores SIGPIPE and SIGHUP as we usually do. @@ -32,7 +32,7 @@ class SignalHandler final { /** * To install one or several signals to handle. - * Upon any signal arrives, the cooresponding handler would be invoked, + * Upon any signal arrives, the corresponding handler would be invoked, * with an argument holding the informations about the signal and the sender. * The handler typically prints out the info and do some other things, * e.g. stop the process on SIGTERM. diff --git a/src/common/base/SlowOpTracker.cpp b/src/common/base/SlowOpTracker.cpp index debdaa048c2..0986643d9fc 100644 --- a/src/common/base/SlowOpTracker.cpp +++ b/src/common/base/SlowOpTracker.cpp @@ -8,4 +8,4 @@ #include "common/base/Base.h" #include "common/time/WallClock.h" -DEFINE_int64(slow_op_threshhold_ms, 100, "default threshhold for slow operation"); +DEFINE_int64(slow_op_threshold_ms, 100, "default threshold for slow operation"); diff --git a/src/common/base/SlowOpTracker.h b/src/common/base/SlowOpTracker.h index 5d50628cf25..63c86597298 100644 --- a/src/common/base/SlowOpTracker.h +++ b/src/common/base/SlowOpTracker.h @@ -9,7 +9,7 @@ #include "common/base/Base.h" #include "common/time/WallClock.h" -DECLARE_int64(slow_op_threshhold_ms); +DECLARE_int64(slow_op_threshold_ms); namespace nebula { @@ -19,12 +19,12 @@ class SlowOpTracker { ~SlowOpTracker() = default; - bool slow(int64_t threshhold = 0) { + bool slow(int64_t threshold = 0) { dur_ = time::WallClock::fastNowInMilliSec() - startMs_; if (dur_ < 0) { dur_ = 0; } - return threshhold > 0 ? dur_ > threshhold : dur_ > FLAGS_slow_op_threshhold_ms; + return threshold > 0 ? dur_ > threshold : dur_ > FLAGS_slow_op_threshold_ms; } void output(const std::string& prefix, const std::string& msg) { diff --git a/src/common/base/Status.h b/src/common/base/Status.h index 60e74fcbf2b..de7c9dbc471 100644 --- a/src/common/base/Status.h +++ b/src/common/base/Status.h @@ -136,7 +136,7 @@ class Status final { // If some kind of error really needs to be distinguished with others using a // specific code, other than a general code and specific msg, you could add a - // new code below, e.g. kSomeError, and add the cooresponding + // new code below, e.g. kSomeError, and add the corresponding // STATUS_GENERATOR(SomeError) enum Code : uint16_t { // OK diff --git a/src/common/base/StatusOr.h b/src/common/base/StatusOr.h index 6b668402ab3..3962f34642c 100644 --- a/src/common/base/StatusOr.h +++ b/src/common/base/StatusOr.h @@ -63,7 +63,7 @@ class StatusOr final { // `StatusOr' contains neither a Status nor a value // in the default-constructed case. // From the semantics aspect, it must have been associated with - // a Status or value eventualy before being used. + // a Status or value eventually before being used. StatusOr() { state_ = kVoid; } // Destruct the `Status' or value if it's holding one. @@ -172,7 +172,7 @@ class StatusOr final { return *this; } - // Move assigment operator from a rvalue of `StatusOr' + // Move assignment operator from a rvalue of `StatusOr' template >> StatusOr &operator=(StatusOr &&rhs) noexcept { reset(); @@ -190,7 +190,7 @@ class StatusOr final { return *this; } - // Move assigment operator from a rvalue of any compatible type with `T' + // Move assignment operator from a rvalue of any compatible type with `T' template >> StatusOr &operator=(U &&value) noexcept { destruct(); @@ -236,7 +236,7 @@ class StatusOr final { } // Return the non-const lvalue reference to the associated value - // `ok()' is DCHECKed + // `ok()' is DCHECK'd T &value() & { DCHECK(ok()); return variant_.value_; diff --git a/src/common/base/StringUnorderedMap.h b/src/common/base/StringUnorderedMap.h index cdedae10036..081e4d68786 100644 --- a/src/common/base/StringUnorderedMap.h +++ b/src/common/base/StringUnorderedMap.h @@ -41,7 +41,7 @@ class StringUnorderedMap { /****************************************** * - * Assignmets + * Assignments * *****************************************/ StringUnorderedMap& operator=(const StringUnorderedMap& other); diff --git a/src/common/conf/Configuration.cpp b/src/common/conf/Configuration.cpp index a250307d868..c5d3d7ba0e8 100644 --- a/src/common/conf/Configuration.cpp +++ b/src/common/conf/Configuration.cpp @@ -180,7 +180,7 @@ Status Configuration::fetchAsIntArray(const char *key, std::vector &val try { val.emplace_back(entry.asInt()); } catch (const std::exception &ex) { - // Avoid format sercure by literal + // Avoid format secure by literal return Status::Error("%s", ex.what()); } } @@ -201,7 +201,7 @@ Status Configuration::fetchAsDoubleArray(const char *key, std::vector &v try { val.emplace_back(entry.asDouble()); } catch (const std::exception &ex) { - // Avoid format sercure by literal + // Avoid format secure by literal return Status::Error("%s", ex.what()); } } @@ -222,7 +222,7 @@ Status Configuration::fetchAsBoolArray(const char *key, std::vector &val) try { val.emplace_back(entry.asBool()); } catch (const std::exception &ex) { - // Avoid format sercure by literal + // Avoid format secure by literal return Status::Error("%s", ex.what()); } } @@ -243,7 +243,7 @@ Status Configuration::fetchAsStringArray(const char *key, std::vector proces try { processor(key.asString()); } catch (const std::exception &ex) { - // Avoid format sercure by literal + // Avoid format secure by literal return Status::Error("%s", ex.what()); } } @@ -270,7 +270,7 @@ Status Configuration::forEachItem( try { processor(item.first.asString(), item.second); } catch (const std::exception &ex) { - // Avoid format sercure by literal + // Avoid format secure by literal return Status::Error("%s", ex.what()); } } diff --git a/src/common/conf/test/ConfigurationTest.cpp b/src/common/conf/test/ConfigurationTest.cpp index 10043fe1a4f..6ed00c7d145 100644 --- a/src/common/conf/test/ConfigurationTest.cpp +++ b/src/common/conf/test/ConfigurationTest.cpp @@ -63,7 +63,7 @@ TEST(Configuration, Basic) { } { bool val; - status = conf.fetchAsBool("nonexist", val); + status = conf.fetchAsBool("nonexistent", val); ASSERT_FALSE(status.ok()) << status.toString(); } } diff --git a/src/common/datatypes/Date.h b/src/common/datatypes/Date.h index 5e6087e8a4b..80b58243bb6 100644 --- a/src/common/datatypes/Date.h +++ b/src/common/datatypes/Date.h @@ -12,7 +12,7 @@ namespace nebula { -// In nebula only store UTC time, and the interpretion of time value based on +// In nebula only store UTC time, and the interpretation of time value based on // the timezone configuration in current system. extern const int64_t kDaysSoFar[]; diff --git a/src/common/datatypes/test/CMakeLists.txt b/src/common/datatypes/test/CMakeLists.txt index 2ec47fd1962..e2feb822850 100644 --- a/src/common/datatypes/test/CMakeLists.txt +++ b/src/common/datatypes/test/CMakeLists.txt @@ -54,7 +54,7 @@ nebula_add_test( nebula_add_test( NAME - structral_value_test + structural_value_test SOURCES PathTest.cpp EdgeTest.cpp diff --git a/src/common/datatypes/test/ValueTest.cpp b/src/common/datatypes/test/ValueTest.cpp index 8a89ad2422d..b27253e935d 100644 --- a/src/common/datatypes/test/ValueTest.cpp +++ b/src/common/datatypes/test/ValueTest.cpp @@ -627,7 +627,7 @@ TEST(Value, TypeCast) { Value vNull(NullType::__NULL__); Value vIntMin(std::numeric_limits::min()); Value vIntMax(std::numeric_limits::max()); - Value vFloatMin(std::numeric_limits::lowest()); // non-negtive + Value vFloatMin(std::numeric_limits::lowest()); // non-negative Value vFloatMax(std::numeric_limits::max()); { @@ -720,7 +720,7 @@ TEST(Value, TypeCast) { EXPECT_EQ(Value::Type::FLOAT, vf.type()); EXPECT_EQ(vf.getFloat(), std::numeric_limits::max()); - // Invlaid string + // Invalid string vf = Value("12abc").toFloat(); EXPECT_EQ(Value::kNullValue, vf); @@ -788,7 +788,7 @@ TEST(Value, TypeCast) { vi = Value("-9223372036854775809").toInt(); EXPECT_EQ(Value::kNullOverflow, vi); - // Invlaid string + // Invalid string vi = Value("12abc").toInt(); EXPECT_EQ(Value::kNullValue, vi); @@ -1071,7 +1071,7 @@ TEST(Value, DecodeEncode) { // time Value(Time{1, 2, 3, 4}), - // datatime + // datetime Value(DateTime{1, 2, 3, 4, 5, 6, 7}), // vertex diff --git a/src/common/datatypes/test/ValueToJsonTest.cpp b/src/common/datatypes/test/ValueToJsonTest.cpp index fa20b9bf5b9..a35220fc78a 100644 --- a/src/common/datatypes/test/ValueToJsonTest.cpp +++ b/src/common/datatypes/test/ValueToJsonTest.cpp @@ -52,20 +52,20 @@ TEST(ValueToJson, vertex) { tag2, }})); { - dynamic expectedVeretxJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( + dynamic expectedVertexJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( "tagName1.prop2", nullptr)("tagName1.prop3", "123"); - ASSERT_EQ(expectedVeretxJson, vertexStrVid.toJson()); + ASSERT_EQ(expectedVertexJson, vertexStrVid.toJson()); - dynamic expectedVeretxMetaJson = dynamic::object("id", "Vid")("type", "vertex"); - ASSERT_EQ(expectedVeretxMetaJson, vertexStrVid.getMetaData()); + dynamic expectedVertexMetaJson = dynamic::object("id", "Vid")("type", "vertex"); + ASSERT_EQ(expectedVertexMetaJson, vertexStrVid.getMetaData()); } { - dynamic expectedVeretxJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( + dynamic expectedVertexJson = dynamic::object("tagName.prop", 2)("tagName1.prop1", 2)( "tagName1.prop2", nullptr)("tagName1.prop3", "123"); - ASSERT_EQ(expectedVeretxJson, vertexIntVid.toJson()); + ASSERT_EQ(expectedVertexJson, vertexIntVid.toJson()); - dynamic expectedVeretxMetaJson = dynamic::object("id", 001)("type", "vertex"); - ASSERT_EQ(expectedVeretxMetaJson, vertexIntVid.getMetaData()); + dynamic expectedVertexMetaJson = dynamic::object("id", 001)("type", "vertex"); + ASSERT_EQ(expectedVertexMetaJson, vertexIntVid.getMetaData()); } } @@ -159,7 +159,7 @@ TEST(ValueToJson, Set) { DateTime(2021, 12, 21, 13, 30, 15, 0)})); // datetime dynamic expectedSetJsonObj = dynamic::array( 2, 2.33, true, "str", "2021-12-21", "13:30:15.000000Z", "2021-12-21T13:30:15.0Z"); - // The underlying data strcuture is unordered_set, so sort before the comparison + // The underlying data structure is unordered_set, so sort before the comparison auto actualJson = set.toJson(); std::sort(actualJson.begin(), actualJson.end()); std::sort(expectedSetJsonObj.begin(), expectedSetJsonObj.end()); @@ -246,7 +246,7 @@ TEST(ValueToJson, DecodeEncode) { // time Value(Time{1, 2, 3, 4}), - // datatime + // datetime Value(DateTime{1, 2, 3, 4, 5, 6, 7}), // vertex diff --git a/src/common/expression/Expression.cpp b/src/common/expression/Expression.cpp index 113e74be259..01e8388c848 100644 --- a/src/common/expression/Expression.cpp +++ b/src/common/expression/Expression.cpp @@ -560,7 +560,7 @@ std::ostream& operator<<(std::ostream& os, Expression::Kind kind) { os << "Equal"; break; case Expression::Kind::kRelNE: - os << "NotEuqal"; + os << "NotEqual"; break; case Expression::Kind::kRelLT: os << "LessThan"; diff --git a/src/common/expression/LabelAttributeExpression.h b/src/common/expression/LabelAttributeExpression.h index 633797350a6..809b3c44a5b 100644 --- a/src/common/expression/LabelAttributeExpression.h +++ b/src/common/expression/LabelAttributeExpression.h @@ -67,11 +67,11 @@ class LabelAttributeExpression final : public Expression { } void writeTo(Encoder&) const override { - LOG(FATAL) << "LabelAttributeExpression not supporte to encode."; + LOG(FATAL) << "LabelAttributeExpression not supported to encode."; } void resetFrom(Decoder&) override { - LOG(FATAL) << "LabelAttributeExpression not supporte to decode."; + LOG(FATAL) << "LabelAttributeExpression not supported to decode."; } private: diff --git a/src/common/expression/PropertyExpression.h b/src/common/expression/PropertyExpression.h index f18f2831af6..4f8c86f34e4 100644 --- a/src/common/expression/PropertyExpression.h +++ b/src/common/expression/PropertyExpression.h @@ -16,7 +16,7 @@ constexpr char const kSrcRef[] = "$^"; constexpr char const kDstRef[] = "$$"; // Base abstract expression of getting properties. -// An expresion of getting props is consisted with 3 parts: +// An expression of getting props is consisted with 3 parts: // 1. reference, e.g. $-, $, $^, $$ // 2. symbol, a symbol name, e.g. tag_name, edge_name, variable_name, // 3. property, property name. diff --git a/src/common/expression/VariableExpression.h b/src/common/expression/VariableExpression.h index 245e60e9cae..23210860c85 100644 --- a/src/common/expression/VariableExpression.h +++ b/src/common/expression/VariableExpression.h @@ -92,11 +92,11 @@ class VersionedVariableExpression final : public Expression { : Expression(pool, Kind::kVersionedVar), var_(var), version_(version) {} void writeTo(Encoder&) const override { - LOG(FATAL) << "VersionedVairableExpression not support to encode."; + LOG(FATAL) << "VersionedVariableExpression not support to encode."; } void resetFrom(Decoder&) override { - LOG(FATAL) << "VersionedVairableExpression not support to decode."; + LOG(FATAL) << "VersionedVariableExpression not support to decode."; } private: diff --git a/src/common/expression/test/ConstantExpressionTest.cpp b/src/common/expression/test/ConstantExpressionTest.cpp index 55eecb27e6d..e20a0558942 100644 --- a/src/common/expression/test/ConstantExpressionTest.cpp +++ b/src/common/expression/test/ConstantExpressionTest.cpp @@ -16,8 +16,8 @@ TEST_F(ExpressionTest, Constant) { EXPECT_EQ(eval, 1); } { - auto doubl = ConstantExpression::make(&pool, 1.0); - auto eval = Expression::eval(doubl, gExpCtxt); + auto double_ = ConstantExpression::make(&pool, 1.0); + auto eval = Expression::eval(double_, gExpCtxt); EXPECT_EQ(eval.type(), Value::Type::FLOAT); EXPECT_EQ(eval, 1.0); } diff --git a/src/common/expression/test/ExpressionBenchmark.cpp b/src/common/expression/test/ExpressionBenchmark.cpp index 45f8eec2b5c..899250b4cea 100644 --- a/src/common/expression/test/ExpressionBenchmark.cpp +++ b/src/common/expression/test/ExpressionBenchmark.cpp @@ -197,7 +197,7 @@ getEdgeProp(ger_edge_prop_int) 51.88ns 19.27M getEdgeProp(ger_edge_prop_string) 88.41ns 11.31M ============================================================================ -The latest(2020/07/13) vesion of the implementation of expressions, +The latest(2020/07/13) version of the implementation of expressions, which return Value for getting edge props and src props in ExpressionContext. ============================================================================ ExpressionBenchmark.cpprelative time/iter iters/s diff --git a/src/common/expression/test/ExpressionContextMock.h b/src/common/expression/test/ExpressionContextMock.h index 9f91016d8fc..dda2d5369c0 100644 --- a/src/common/expression/test/ExpressionContextMock.h +++ b/src/common/expression/test/ExpressionContextMock.h @@ -112,7 +112,7 @@ class ExpressionContextMock final : public ExpressionContext { Value getColumn(int32_t index) const override; void setVar(const std::string& var, Value val) override { - // used by tests of list comprehesion, predicate or reduce + // used by tests of list comprehension, predicate or reduce if (var == "n" || var == "p" || var == "totalNum") { vals_.erase(var); vals_[var] = val; diff --git a/src/common/fs/FileUtils.h b/src/common/fs/FileUtils.h index b237ef7ec65..d87c59ec227 100644 --- a/src/common/fs/FileUtils.h +++ b/src/common/fs/FileUtils.h @@ -50,7 +50,7 @@ class FileUtils final { // Tell if stdin attached to a TTY static bool isStdinTTY(); - // Tell if stdout atached to a TTY + // Tell if stdout attached to a TTY static bool isStdoutTTY(); // Tell if stderr attached to a TTY static bool isStderrTTY(); diff --git a/src/common/fs/test/FileUtilsTest.cpp b/src/common/fs/test/FileUtilsTest.cpp index 9e15669d51e..adeff0e3351 100644 --- a/src/common/fs/test/FileUtilsTest.cpp +++ b/src/common/fs/test/FileUtilsTest.cpp @@ -161,7 +161,7 @@ TEST(FileUtils, removeDirRecursively) { fd = mkstemp(fileTemp); ASSERT_EQ(close(fd), 0); - // Recursively removal shold succeed + // Recursively removal should succeed EXPECT_TRUE(FileUtils::remove(dirTemp, true)); // Verify the directory is gone diff --git a/src/common/fs/test/TempFileTest.cpp b/src/common/fs/test/TempFileTest.cpp index 0014afb218b..9c64e6fe93d 100644 --- a/src/common/fs/test/TempFileTest.cpp +++ b/src/common/fs/test/TempFileTest.cpp @@ -12,7 +12,7 @@ namespace nebula { namespace fs { TEST(TempFile, Basic) { - // auto deletiong + // auto deletion { const char *path = "/tmp/tmp.XXXXXX"; std::string actual_path; diff --git a/src/common/function/FunctionManager.cpp b/src/common/function/FunctionManager.cpp index 097c3eba278..4bf3ac2cd2d 100644 --- a/src/common/function/FunctionManager.cpp +++ b/src/common/function/FunctionManager.cpp @@ -806,7 +806,7 @@ FunctionManager::FunctionManager() { }; } { - // return the mathmatical constant PI + // return the mathematical constant PI auto &attr = functions_["pi"]; attr.minArity_ = 0; attr.maxArity_ = 0; @@ -1644,7 +1644,7 @@ FunctionManager::FunctionManager() { } { auto &attr = functions_["date"]; - // 0 for corrent time + // 0 for current time // 1 for string or map attr.minArity_ = 0; attr.maxArity_ = 1; @@ -1716,7 +1716,7 @@ FunctionManager::FunctionManager() { } { auto &attr = functions_["datetime"]; - // 0 for corrent time + // 0 for current time // 1 for string or map attr.minArity_ = 0; attr.maxArity_ = 1; diff --git a/src/common/function/test/FunctionManagerTest.cpp b/src/common/function/test/FunctionManagerTest.cpp index 9305c788e59..a34a88e0d06 100644 --- a/src/common/function/test/FunctionManagerTest.cpp +++ b/src/common/function/test/FunctionManagerTest.cpp @@ -32,7 +32,7 @@ class FunctionManagerTest : public ::testing::Test { auto result = FunctionManager::get(expr, args.size()); if (!result.ok()) { return ::testing::AssertionFailure() - << "Can't get fuction " << expr << " with " << args.size() << " parameters."; + << "Can't get function " << expr << " with " << args.size() << " parameters."; } auto res = result.value()(argsRef); if (res.type() != expect.type()) { @@ -53,7 +53,7 @@ class FunctionManagerTest : public ::testing::Test { auto result = FunctionManager::get(expr, args.size()); if (!result.ok()) { return ::testing::AssertionFailure() - << "Can't get fuction " << expr << " with " << args.size() << " parameters."; + << "Can't get function " << expr << " with " << args.size() << " parameters."; } auto res = result.value()(argsRef); if (res.type() != expectType) { @@ -68,7 +68,7 @@ class FunctionManagerTest : public ::testing::Test { auto result = FunctionManager::get(expr, args.size()); if (!result.ok()) { return ::testing::AssertionFailure() - << "Can't get fuction " << expr << " with " << args.size() << " parameters."; + << "Can't get function " << expr << " with " << args.size() << " parameters."; } return ::testing::AssertionSuccess(); } @@ -1489,7 +1489,7 @@ TEST_F(FunctionManagerTest, returnType) { } } -TEST_F(FunctionManagerTest, SchemaReleated) { +TEST_F(FunctionManagerTest, SchemaRelated) { Vertex vertex; Edge edge; diff --git a/src/common/geo/GeoFunction.cpp b/src/common/geo/GeoFunction.cpp index bb058b7b04c..f1340af67bd 100644 --- a/src/common/geo/GeoFunction.cpp +++ b/src/common/geo/GeoFunction.cpp @@ -489,8 +489,8 @@ std::vector GeoFunction::coveringCellIds(const S2Region& r, double GeoFunction::distanceOfS2PolylineWithS2Point(const S2Polyline* aLine, const S2Point& bPoint) { int tmp; - S2Point cloestPointOnLine = aLine->Project(bPoint, &tmp); - return S2Earth::GetDistanceMeters(cloestPointOnLine, bPoint); + S2Point closestPointOnLine = aLine->Project(bPoint, &tmp); + return S2Earth::GetDistanceMeters(closestPointOnLine, bPoint); } double GeoFunction::distanceOfS2PolygonWithS2Polyline(const S2Polygon* aPolygon, diff --git a/src/common/geo/io/wkb/WKBWriter.cpp b/src/common/geo/io/wkb/WKBWriter.cpp index 611ac4bd682..d17572ef154 100644 --- a/src/common/geo/io/wkb/WKBWriter.cpp +++ b/src/common/geo/io/wkb/WKBWriter.cpp @@ -33,7 +33,7 @@ std::string WKBWriter::write(const Geography& geog, ByteOrder byteOrder) { } default: LOG(FATAL) - << "Geomtry shapes other than Point/LineString/Polygon are not currently supported"; + << "Geometry shapes other than Point/LineString/Polygon are not currently supported"; return ""; } } diff --git a/src/common/geo/io/wkt/WKTWriter.cpp b/src/common/geo/io/wkt/WKTWriter.cpp index ec750e566a0..ed925e8a777 100644 --- a/src/common/geo/io/wkt/WKTWriter.cpp +++ b/src/common/geo/io/wkt/WKTWriter.cpp @@ -45,7 +45,7 @@ std::string WKTWriter::write(const Geography& geog) const { } default: LOG(ERROR) - << "Geomtry shapes other than Point/LineString/Polygon are not currently supported"; + << "Geometry shapes other than Point/LineString/Polygon are not currently supported"; return ""; } } diff --git a/src/common/graph/Response.h b/src/common/graph/Response.h index 727c974f68b..51ad260240d 100644 --- a/src/common/graph/Response.h +++ b/src/common/graph/Response.h @@ -82,7 +82,7 @@ X(E_BALANCED, -2024) \ X(E_NO_RUNNING_BALANCE_PLAN, -2025) \ X(E_NO_VALID_HOST, -2026) \ - X(E_CORRUPTTED_BALANCE_PLAN, -2027) \ + X(E_CORRUPTED_BALANCE_PLAN, -2027) \ X(E_NO_INVALID_BALANCE_PLAN, -2028) \ \ /* Authentication Failure */ \ diff --git a/src/common/graph/tests/ResponseEncodeDecodeTest.cpp b/src/common/graph/tests/ResponseEncodeDecodeTest.cpp index a741f57dd3b..de61bc32de8 100644 --- a/src/common/graph/tests/ResponseEncodeDecodeTest.cpp +++ b/src/common/graph/tests/ResponseEncodeDecodeTest.cpp @@ -15,7 +15,7 @@ namespace nebula { using serializer = apache::thrift::CompactSerializer; -TEST(ResponseEncodDecodeTest, Basic) { +TEST(ResponseEncodeDecodeTest, Basic) { // auth response { std::vector resps; @@ -88,7 +88,7 @@ TEST(ResponseEncodDecodeTest, Basic) { } } -TEST(ResponseEncodDecodeTest, ToJson) { +TEST(ResponseEncodeDecodeTest, ToJson) { // plan description { std::vector pds; diff --git a/src/common/meta/GflagsManager.cpp b/src/common/meta/GflagsManager.cpp index 56731c28e57..a7dd8289981 100644 --- a/src/common/meta/GflagsManager.cpp +++ b/src/common/meta/GflagsManager.cpp @@ -54,7 +54,7 @@ std::unordered_map> GflagsManager {"v", {cpp2::ConfigMode::MUTABLE, false}}, {"heartbeat_interval_secs", {cpp2::ConfigMode::MUTABLE, false}}, {"meta_client_retry_times", {cpp2::ConfigMode::MUTABLE, false}}, - {"slow_op_threshhold_ms", {cpp2::ConfigMode::MUTABLE, false}}, + {"slow_op_threshold_ms", {cpp2::ConfigMode::MUTABLE, false}}, {"wal_ttl", {cpp2::ConfigMode::MUTABLE, false}}, {"clean_wal_interval_secs", {cpp2::ConfigMode::MUTABLE, false}}, {"custom_filter_interval_secs", {cpp2::ConfigMode::MUTABLE, false}}, diff --git a/src/common/network/NetworkUtils.cpp b/src/common/network/NetworkUtils.cpp index 14d3323610e..b0571c0e240 100644 --- a/src/common/network/NetworkUtils.cpp +++ b/src/common/network/NetworkUtils.cpp @@ -88,7 +88,7 @@ bool NetworkUtils::getDynamicPortRange(uint16_t& low, uint16_t& high) { if (fscanf(pipe, "%hu %hu", &low, &high) != 2) { LOG(ERROR) << "Failed to read from /proc/sys/net/ipv4/ip_local_port_range"; - // According to ICANN, the port range is devided into three sections + // According to ICANN, the port range is divided into three sections // // Well-known ports: 0 to 1023 (used for system services) // Registered/user ports: 1024 to 49151 @@ -171,7 +171,7 @@ uint16_t NetworkUtils::getAvailablePort() { uint16_t port = 0; while (true) { // NOTE - // The availablity of port number *outside* the ephemeral port range is + // The availability of port number *outside* the ephemeral port range is // relatively stable for the binding purpose. port = folly::Random::rand32(1025, low); if (portsInUse.find(port) != portsInUse.end()) { diff --git a/src/common/network/NetworkUtils.h b/src/common/network/NetworkUtils.h index 8062397c898..be95b1580e8 100644 --- a/src/common/network/NetworkUtils.h +++ b/src/common/network/NetworkUtils.h @@ -24,7 +24,7 @@ class NetworkUtils final { static StatusOr getIPv4FromDevice(const std::string& device); // List out all Ipv4 addresses, including the loopback one. static StatusOr> listIPv4s(); - // List out all network devices and its cooresponding Ipv4 address. + // List out all network devices and its corresponding Ipv4 address. static StatusOr> listDeviceAndIPv4s(); // Get the local dynamic port range [low, high], only works for IPv4 diff --git a/src/common/network/test/NetworkUtilsTest.cpp b/src/common/network/test/NetworkUtilsTest.cpp index 3092c255909..21d38e27101 100644 --- a/src/common/network/test/NetworkUtilsTest.cpp +++ b/src/common/network/test/NetworkUtilsTest.cpp @@ -34,7 +34,7 @@ TEST(NetworkUtils, getIPv4FromDevice) { ASSERT_EQ("0.0.0.0", result.value()); } { - auto result = NetworkUtils::getIPv4FromDevice("non-existence"); + auto result = NetworkUtils::getIPv4FromDevice("nonexistent"); ASSERT_FALSE(result.ok()) << result.status(); } } diff --git a/src/common/plugin/fulltext/test/FulltextPluginTest.cpp b/src/common/plugin/fulltext/test/FulltextPluginTest.cpp index ce8f85c1ce8..c79550da633 100644 --- a/src/common/plugin/fulltext/test/FulltextPluginTest.cpp +++ b/src/common/plugin/fulltext/test/FulltextPluginTest.cpp @@ -100,18 +100,18 @@ TEST(FulltextPluginTest, ESBulkTest) { "charset=utf-8\" -XPOST \"http://127.0.0.1:9200/_bulk\""; ASSERT_EQ(expected, header); - std::vector bodys; + std::vector bodies; for (const auto& item : items) { folly::dynamic meta = folly::dynamic::object("_id", DocIDTraits::docId(item))("_index", item.index); folly::dynamic data = folly::dynamic::object("value", DocIDTraits::val(item.val))( "column_id", DocIDTraits::column(item.column)); - bodys.emplace_back(folly::dynamic::object("index", std::move(meta))); - bodys.emplace_back(std::move(data)); + bodies.emplace_back(folly::dynamic::object("index", std::move(meta))); + bodies.emplace_back(std::move(data)); } auto body = ESStorageAdapter().bulkBody(items); - verifyBodyStr(body, std::move(bodys)); + verifyBodyStr(body, std::move(bodies)); } TEST(FulltextPluginTest, ESPutToTest) { @@ -216,13 +216,13 @@ TEST(FulltextPluginTest, ESResultTest) { // "root_cause": [ // { // "type": "parsing_exception", - // "reason": "Unknown key for a VALUE_STRING in [_soure].", + // "reason": "Unknown key for a VALUE_STRING in [_source].", // "line": 1, // "col": 128 // } // ], // "type": "parsing_exception", - // "reason": "Unknown key for a VALUE_STRING in [_soure].", + // "reason": "Unknown key for a VALUE_STRING in [_source].", // "line": 1, // "col": 128 // }, @@ -231,9 +231,9 @@ TEST(FulltextPluginTest, ESResultTest) { { std::string json = R"({"error": {"root_cause": [{"type": "parsing_exception","reason": - "Unknown key for a VALUE_STRING in [_soure].","line": 1,"col": 128}], + "Unknown key for a VALUE_STRING in [_source].","line": 1,"col": 128}], "type": "parsing_exception","reason": "Unknown key for a VALUE_STRING - in [_soure].","line": 1,"col": 128},"status": 400})"; + in [_source].","line": 1,"col": 128},"status": 400})"; HostAddr localHost_{"127.0.0.1", 9200}; HttpClient hc(localHost_); std::vector rows; diff --git a/src/common/process/ProcessUtils.cpp b/src/common/process/ProcessUtils.cpp index 0b73c306e8c..d33851131fa 100644 --- a/src/common/process/ProcessUtils.cpp +++ b/src/common/process/ProcessUtils.cpp @@ -26,7 +26,7 @@ Status ProcessUtils::isPidAvailable(pid_t pid) { return Status::Error("Process `%d' already existed but denied to access", pid); } if (errno != ESRCH) { - return Status::Error("Uknown error: `%s'", ::strerror(errno)); + return Status::Error("Unknown error: `%s'", ::strerror(errno)); } return Status::OK(); } diff --git a/src/common/stats/StatsManager.cpp b/src/common/stats/StatsManager.cpp index dc6a604c944..083b7c890db 100644 --- a/src/common/stats/StatsManager.cpp +++ b/src/common/stats/StatsManager.cpp @@ -163,12 +163,12 @@ void StatsManager::addValue(const CounterId& id, VT value) { // static bool StatsManager::strToPct(folly::StringPiece part, double& pct) { - static const int32_t dividors[] = {1, 1, 10, 100, 1000, 10000}; + static const int32_t divisors[] = {1, 1, 10, 100, 1000, 10000}; try { size_t len = part.size() - 1; if (len > 0 && len <= 6) { auto digits = folly::StringPiece(&(part[1]), len); - pct = folly::to(digits) / dividors[len - 1]; + pct = folly::to(digits) / divisors[len - 1]; return true; } else { LOG(ERROR) << "Precision " << part.toString() << " is too long"; diff --git a/src/common/stats/StatsManager.h b/src/common/stats/StatsManager.h index 5c4fe4960f6..3a1524c088a 100644 --- a/src/common/stats/StatsManager.h +++ b/src/common/stats/StatsManager.h @@ -61,7 +61,7 @@ class CounterId final { * This is a utility class to keep track the service's statistic information. * * It contains a bunch of counters. Each counter has a unique name and three - * levels of time ranges, which are 1 minute, 10 minues, and 1 hour. Each + * levels of time ranges, which are 1 minute, 10 minutes, and 1 hour. Each * counter also associates with one or multiple statistic types. The supported * statistic types are SUM, COUNT, AVG, RATE, MIN, MAX and percentiles. Among * those, MIN, MAX and Percentile are only supported for Histogram counters. diff --git a/src/common/thread/GenericThreadPool.h b/src/common/thread/GenericThreadPool.h index 63e8e6a07dd..fc2bdd87ccf 100644 --- a/src/common/thread/GenericThreadPool.h +++ b/src/common/thread/GenericThreadPool.h @@ -44,7 +44,7 @@ class GenericThreadPool final : public nebula::cpp::NonCopyable, public nebula:: bool start(size_t nrThreads, const std::string &name = ""); /** - * Asynchronouly to notify the workers to stop handling further new tasks. + * Asynchronously to notify the workers to stop handling further new tasks. */ bool stop(); diff --git a/src/common/thread/GenericWorker.cpp b/src/common/thread/GenericWorker.cpp index c21c442c8db..b80076450fb 100644 --- a/src/common/thread/GenericWorker.cpp +++ b/src/common/thread/GenericWorker.cpp @@ -142,10 +142,10 @@ void GenericWorker::onNotify() { } } { - decltype(purgingingTimers_) newcomings; + decltype(purgingTimers_) newcomings; { std::lock_guard guard(lock_); - newcomings.swap(purgingingTimers_); + newcomings.swap(purgingTimers_); } for (auto id : newcomings) { purgeTimerInternal(id); @@ -164,7 +164,7 @@ GenericWorker::Timer::~Timer() { void GenericWorker::purgeTimerTask(uint64_t id) { { std::lock_guard guard(lock_); - purgingingTimers_.emplace_back(id); + purgingTimers_.emplace_back(id); } notify(); } diff --git a/src/common/thread/GenericWorker.h b/src/common/thread/GenericWorker.h index fe07592bb1f..80999197b12 100644 --- a/src/common/thread/GenericWorker.h +++ b/src/common/thread/GenericWorker.h @@ -39,7 +39,7 @@ class GenericWorker final : public nebula::cpp::NonCopyable, public nebula::cpp: ~GenericWorker(); /** - * To allocate resouces and launch the internal thread which executes + * To allocate resources and launch the internal thread which executes * the event loop to make this worker usable. * * Optionally, you could give the internal thread a specific name, @@ -54,7 +54,7 @@ class GenericWorker final : public nebula::cpp::NonCopyable, public nebula::cpp: bool NG_MUST_USE_RESULT start(std::string name = ""); /** - * Asynchronouly to notify the worker to stop handling further new tasks. + * Asynchronously to notify the worker to stop handling further new tasks. */ bool stop(); @@ -160,7 +160,7 @@ class GenericWorker final : public nebula::cpp::NonCopyable, public nebula::cpp: std::vector> pendingTasks_; using TimerPtr = std::unique_ptr; std::vector pendingTimers_; - std::vector purgingingTimers_; + std::vector purgingTimers_; std::unordered_map activeTimers_; std::unique_ptr thread_; }; diff --git a/src/common/time/TimeConversion.h b/src/common/time/TimeConversion.h index a5950e1bafc..aeaaab5de01 100644 --- a/src/common/time/TimeConversion.h +++ b/src/common/time/TimeConversion.h @@ -109,7 +109,7 @@ class TimeConversion { // https://en.cppreference.com/w/cpp/language/operator_arithmetic). So make // sure the result is what we expected, if right shift not filled highest bit // by the sign bit that the process will falls back to procedure which fill - // hightest bit by the sign bit value. + // highest bit by the sign bit value. static int64_t shr(int64_t a, int b) { int64_t one = 1; return (-one >> 1 == -1 ? a >> b : (a + (a < 0)) / (one << b) - (a < 0)); diff --git a/src/common/time/TimeUtils.cpp b/src/common/time/TimeUtils.cpp index bcc6773032c..e9e65fb6361 100644 --- a/src/common/time/TimeUtils.cpp +++ b/src/common/time/TimeUtils.cpp @@ -65,7 +65,7 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits::max() / 10000000 } dt.microsec += kv.second.getInt(); } else { - return Status::Error("Invlaid parameter `%s'.", kv.first.c_str()); + return Status::Error("Invalid parameter `%s'.", kv.first.c_str()); } } auto result = validateDate(dt); @@ -98,7 +98,7 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits::max() / 10000000 } d.day = kv.second.getInt(); } else { - return Status::Error("Invlaid parameter `%s'.", kv.first.c_str()); + return Status::Error("Invalid parameter `%s'.", kv.first.c_str()); } } auto result = validateDate(d); @@ -140,7 +140,7 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits::max() / 10000000 } t.microsec += kv.second.getInt(); } else { - return Status::Error("Invlaid parameter `%s'.", kv.first.c_str()); + return Status::Error("Invalid parameter `%s'.", kv.first.c_str()); } } return t; diff --git a/src/common/time/TimeUtils.h b/src/common/time/TimeUtils.h index 12235e0851e..2c409817391 100644 --- a/src/common/time/TimeUtils.h +++ b/src/common/time/TimeUtils.h @@ -25,7 +25,7 @@ namespace nebula { namespace time { -// In nebula only store UTC time, and the interpretion of time value based on +// In nebula only store UTC time, and the interpretation of time value based on // the timezone configuration in current system. class TimeUtils { diff --git a/src/common/time/test/TimeUtilsTest.cpp b/src/common/time/test/TimeUtilsTest.cpp index 60c2d53e44b..d0cc1edf7be 100644 --- a/src/common/time/test/TimeUtilsTest.cpp +++ b/src/common/time/test/TimeUtilsTest.cpp @@ -11,7 +11,7 @@ namespace nebula { -TEST(Time, secondsTimeCovertion) { +TEST(Time, secondsTimeConversion) { // DateTime { std::vector values; diff --git a/src/common/utils/MemoryLockWrapper.h b/src/common/utils/MemoryLockWrapper.h index 8e5f73ecca4..7fff4f6bf32 100644 --- a/src/common/utils/MemoryLockWrapper.h +++ b/src/common/utils/MemoryLockWrapper.h @@ -68,7 +68,7 @@ class MemoryLockGuard { } // this will manual set the lock to unlocked state - // which mean will not release all locks automaticly + // which mean will not release all locks automatically // please make sure you really know the side effect void forceLock() { locked_ = true; } diff --git a/src/common/utils/MetaKeyUtils.cpp b/src/common/utils/MetaKeyUtils.cpp index 92dfa0ea400..3018051273d 100644 --- a/src/common/utils/MetaKeyUtils.cpp +++ b/src/common/utils/MetaKeyUtils.cpp @@ -27,7 +27,7 @@ static const std::unordered_map> syste {"ft_service", {"__ft_service__", false}}, {"sessions", {"__sessions__", true}}}; -// SystemInfo will always be backuped +// SystemInfo will always be backed up static const std::unordered_map> systemInfoMaps{ {"autoIncrementId", {"__id__", true}}, {"lastUpdateTime", {"__last_update_time__", true}}}; @@ -206,7 +206,7 @@ std::vector MetaKeyUtils::parsePartVal(folly::StringPiece val, int par return parsePartValV2(val); } -// partion val is ip(int) + port(int) +// partition val is ip(int) + port(int) std::vector MetaKeyUtils::parsePartValV1(folly::StringPiece val) { std::vector hosts; static const size_t unitSize = sizeof(int32_t) * 2; @@ -889,13 +889,13 @@ std::string MetaKeyUtils::balanceTaskKey( } std::string MetaKeyUtils::balanceTaskVal(BalanceTaskStatus status, - BalanceTaskResult retult, + BalanceTaskResult result, int64_t startTime, int64_t endTime) { std::string val; val.reserve(32); val.append(reinterpret_cast(&status), sizeof(BalanceTaskStatus)) - .append(reinterpret_cast(&retult), sizeof(BalanceTaskResult)) + .append(reinterpret_cast(&result), sizeof(BalanceTaskResult)) .append(reinterpret_cast(&startTime), sizeof(int64_t)) .append(reinterpret_cast(&endTime), sizeof(int64_t)); return val; diff --git a/src/common/utils/MetaKeyUtils.h b/src/common/utils/MetaKeyUtils.h index 49e5b794df1..e01eb2d67fc 100644 --- a/src/common/utils/MetaKeyUtils.h +++ b/src/common/utils/MetaKeyUtils.h @@ -268,7 +268,7 @@ class MetaKeyUtils final { JobID jobId, GraphSpaceID spaceId, PartitionID partId, HostAddr src, HostAddr dst); static std::string balanceTaskVal(BalanceTaskStatus status, - BalanceTaskResult retult, + BalanceTaskResult result, int64_t startTime, int64_t endTime); diff --git a/src/common/utils/NebulaKeyUtils.h b/src/common/utils/NebulaKeyUtils.h index f616e12b0d2..0d0ba17ea24 100644 --- a/src/common/utils/NebulaKeyUtils.h +++ b/src/common/utils/NebulaKeyUtils.h @@ -230,7 +230,7 @@ class NebulaKeyUtils final { } static folly::StringPiece keyWithNoVersion(const folly::StringPiece& rawKey) { - // TODO(heng) We should change the method if varint data version supportted. + // TODO(heng) We should change the method if varint data version supported. return rawKey.subpiece(0, rawKey.size() - sizeof(EdgeVerPlaceHolder)); } @@ -252,7 +252,7 @@ class NebulaKeyUtils final { static folly::StringPiece lockWithNoVersion(const folly::StringPiece& rawKey) { // TODO(liuyu) We should change the method if varint data version - // supportted. + // supported. return rawKey.subpiece(0, rawKey.size() - 1); } diff --git a/src/daemons/MetaDaemon.cpp b/src/daemons/MetaDaemon.cpp index 2a95ded57f9..e1df6369c29 100644 --- a/src/daemons/MetaDaemon.cpp +++ b/src/daemons/MetaDaemon.cpp @@ -328,7 +328,7 @@ int main(int argc, char* argv[]) { } auto handler = std::make_shared(gKVStore.get(), gClusterId); - LOG(INFO) << "The meta deamon start on " << localhost; + LOG(INFO) << "The meta daemon start on " << localhost; try { gServer = std::make_unique(); gServer->setPort(FLAGS_port); diff --git a/src/graph/context/ExecutionContext.cpp b/src/graph/context/ExecutionContext.cpp index b1fcc17fb49..dbb064c01af 100644 --- a/src/graph/context/ExecutionContext.cpp +++ b/src/graph/context/ExecutionContext.cpp @@ -30,7 +30,7 @@ size_t ExecutionContext::numVersions(const std::string& name) const { return it->second.size(); } -// Only keep the last several versoins of the Value +// Only keep the last several versions of the Value void ExecutionContext::truncHistory(const std::string& name, size_t numVersionsToKeep) { auto it = valueMap_.find(name); if (it != valueMap_.end()) { diff --git a/src/graph/context/ExecutionContext.h b/src/graph/context/ExecutionContext.h index 3cb4af51a95..ec6c917a557 100644 --- a/src/graph/context/ExecutionContext.h +++ b/src/graph/context/ExecutionContext.h @@ -61,7 +61,7 @@ class ExecutionContext { void dropResult(const std::string& name); - // Only keep the last several versoins of the Value + // Only keep the last several versions of the Value void truncHistory(const std::string& name, size_t numVersionsToKeep); bool exist(const std::string& name) const { return valueMap_.find(name) != valueMap_.end(); } diff --git a/src/graph/context/ValidateContext.h b/src/graph/context/ValidateContext.h index df59eb1fc69..d3904e91792 100644 --- a/src/graph/context/ValidateContext.h +++ b/src/graph/context/ValidateContext.h @@ -68,9 +68,9 @@ class ValidateContext final { return find->second; } - void addIndex(const std::string& indexName) { indexs_.emplace(indexName); } + void addIndex(const std::string& indexName) { indexes_.emplace(indexName); } - bool hasIndex(const std::string& indexName) { return indexs_.find(indexName) != indexs_.end(); } + bool hasIndex(const std::string& indexName) { return indexes_.find(indexName) != indexes_.end(); } private: // spaces_ is the trace of space switch @@ -83,7 +83,7 @@ class ValidateContext final { std::unordered_map>; Schemas schemas_; std::unordered_set createSpaces_; - std::unordered_set indexs_; + std::unordered_set indexes_; }; } // namespace graph } // namespace nebula diff --git a/src/graph/executor/admin/BalanceLeadersExecutor.h b/src/graph/executor/admin/BalanceLeadersExecutor.h new file mode 100644 index 00000000000..604d49bc444 --- /dev/null +++ b/src/graph/executor/admin/BalanceLeadersExecutor.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ +#define GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ + +#include "graph/context/QueryContext.h" +#include "graph/executor/Executor.h" + +namespace nebula { +namespace graph { + +class BalanceLeadersExecutor final : public Executor { + public: + BalanceLeadersExecutor(const PlanNode *node, QueryContext *qctx) + : Executor("BalanceLeadersExecutor", node, qctx) {} + + folly::Future execute() override; + + private: + folly::Future balanceLeaders(); +}; + +} // namespace graph +} // namespace nebula + +#endif // GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ diff --git a/src/graph/executor/admin/KillQueryExecutor.cpp b/src/graph/executor/admin/KillQueryExecutor.cpp index d3e956a95c7..e892d015788 100644 --- a/src/graph/executor/admin/KillQueryExecutor.cpp +++ b/src/graph/executor/admin/KillQueryExecutor.cpp @@ -13,7 +13,7 @@ namespace graph { folly::Future KillQueryExecutor::execute() { SCOPED_TIMER(&execTime_); - // TODO: permision check + // TODO: permission check QueriesMap toBeVerifiedQueries; QueriesMap killQueries; diff --git a/src/graph/executor/admin/ShowHostsExecutor.cpp b/src/graph/executor/admin/ShowHostsExecutor.cpp index 2ff7b5a39b0..84d4a85bb88 100644 --- a/src/graph/executor/admin/ShowHostsExecutor.cpp +++ b/src/graph/executor/admin/ShowHostsExecutor.cpp @@ -21,7 +21,7 @@ folly::Future ShowHostsExecutor::execute() { folly::Future ShowHostsExecutor::showHosts() { static constexpr char kNoPartition[] = "No valid partition"; - static constexpr char kPartitionDelimeter[] = ", "; + static constexpr char kPartitionDelimiter[] = ", "; auto *shNode = asNode(node()); auto makeTraditionalResult = [&](const std::vector &hostVec) -> DataSet { @@ -56,7 +56,7 @@ folly::Future ShowHostsExecutor::showHosts() { leaderPartsCount[l.first] += l.second.size(); leaders << l.first << ":" << l.second.size(); if (i < lPartsCount.size() - 1) { - leaders << kPartitionDelimeter; + leaders << kPartitionDelimiter; } ++i; } @@ -71,7 +71,7 @@ folly::Future ShowHostsExecutor::showHosts() { allPartsCount[p.first] += p.second.size(); parts << p.first << ":" << p.second.size(); if (i < aPartsCount.size() - 1) { - parts << kPartitionDelimeter; + parts << kPartitionDelimiter; } ++i; } @@ -95,7 +95,7 @@ folly::Future ShowHostsExecutor::showHosts() { for (const auto &spaceEntry : leaderPartsCount) { leaders << spaceEntry.first << ":" << spaceEntry.second; if (i < leaderPartsCount.size() - 1) { - leaders << kPartitionDelimeter; + leaders << kPartitionDelimiter; } ++i; } @@ -103,7 +103,7 @@ folly::Future ShowHostsExecutor::showHosts() { for (const auto &spaceEntry : allPartsCount) { parts << spaceEntry.first << ":" << spaceEntry.second; if (i < allPartsCount.size() - 1) { - parts << kPartitionDelimeter; + parts << kPartitionDelimiter; } ++i; } diff --git a/src/graph/executor/admin/ShowStatsExecutor.cpp b/src/graph/executor/admin/ShowStatsExecutor.cpp index bef1b97eece..aaaccbc2799 100644 --- a/src/graph/executor/admin/ShowStatsExecutor.cpp +++ b/src/graph/executor/admin/ShowStatsExecutor.cpp @@ -21,7 +21,7 @@ folly::Future ShowStatsExecutor::execute() { return qctx()->getMetaClient()->getStats(spaceId).via(runner()).thenValue( [this, spaceId](StatusOr resp) { if (!resp.ok()) { - LOG(ERROR) << "SpaceId: " << spaceId << ", Show staus failed: " << resp.status(); + LOG(ERROR) << "SpaceId: " << spaceId << ", Show status failed: " << resp.status(); return resp.status(); } auto statsItem = std::move(resp).value(); diff --git a/src/graph/executor/admin/SnapshotExecutor.cpp b/src/graph/executor/admin/SnapshotExecutor.cpp index 1b6375711cd..43f0d6869b3 100644 --- a/src/graph/executor/admin/SnapshotExecutor.cpp +++ b/src/graph/executor/admin/SnapshotExecutor.cpp @@ -32,7 +32,7 @@ folly::Future DropSnapshotExecutor::execute() { auto *dsNode = asNode(node()); return qctx() ->getMetaClient() - ->dropSnapshot(dsNode->getShapshotName()) + ->dropSnapshot(dsNode->getSnapshotName()) .via(runner()) .thenValue([](StatusOr resp) { if (!resp.ok()) { diff --git a/src/graph/executor/algo/ProduceAllPathsExecutor.cpp b/src/graph/executor/algo/ProduceAllPathsExecutor.cpp index 26fc027ec16..b66a4d6600f 100644 --- a/src/graph/executor/algo/ProduceAllPathsExecutor.cpp +++ b/src/graph/executor/algo/ProduceAllPathsExecutor.cpp @@ -20,7 +20,7 @@ folly::Future ProduceAllPathsExecutor::execute() { Interims interims; if (!iter->isGetNeighborsIter()) { - return Status::Error("Only accept GetNeighbotsIter."); + return Status::Error("Only accept GetNeighborsIter."); } VLOG(1) << "Edge size: " << iter->size(); for (; iter->valid(); iter->next()) { diff --git a/src/graph/executor/algo/ProduceSemiShortestPathExecutor.cpp b/src/graph/executor/algo/ProduceSemiShortestPathExecutor.cpp index 79e68ae4e97..067b5ca1935 100644 --- a/src/graph/executor/algo/ProduceSemiShortestPathExecutor.cpp +++ b/src/graph/executor/algo/ProduceSemiShortestPathExecutor.cpp @@ -214,7 +214,7 @@ folly::Future ProduceSemiShortestPathExecutor::execute() { CostPaths costPaths(weight, {std::move(path)}); currentCostPathMap[dst].emplace(src, std::move(costPaths)); } else { - // same (src, dst), diffrent edge type or rank + // same (src, dst), different edge type or rank auto currentCost = currentCostPathMap[dst][src].cost_; if (weight == currentCost) { currentCostPathMap[dst][src].paths_.emplace_back(std::move(path)); diff --git a/src/graph/executor/query/UnionAllVersionVarExecutor.cpp b/src/graph/executor/query/UnionAllVersionVarExecutor.cpp index 59cda3eb2aa..856ba363bd0 100644 --- a/src/graph/executor/query/UnionAllVersionVarExecutor.cpp +++ b/src/graph/executor/query/UnionAllVersionVarExecutor.cpp @@ -14,7 +14,7 @@ namespace graph { folly::Future UnionAllVersionVarExecutor::execute() { SCOPED_TIMER(&execTime_); auto* UnionAllVersionVarNode = asNode(node()); - // Retrive all versions of inputVar + // Retrieve all versions of inputVar auto& results = ectx_->getHistory(UnionAllVersionVarNode->inputVar()); DCHECK_GT(results.size(), 0); // List of iterators to be unioned diff --git a/src/graph/executor/test/CartesianProductTest.cpp b/src/graph/executor/test/CartesianProductTest.cpp index c09b239e8f3..1e73d47ef95 100644 --- a/src/graph/executor/test/CartesianProductTest.cpp +++ b/src/graph/executor/test/CartesianProductTest.cpp @@ -106,7 +106,7 @@ TEST_F(CartesianProductTest, twoVars) { checkResult(expected, cp->outputVar()); } -TEST_F(CartesianProductTest, thressVars) { +TEST_F(CartesianProductTest, threeVars) { auto* cp = CartesianProduct::make(qctx_.get(), nullptr); cp->addVar("ds1"); cp->addVar("ds2"); diff --git a/src/graph/executor/test/DedupTest.cpp b/src/graph/executor/test/DedupTest.cpp index 0b75ddd82f4..3692c49933a 100644 --- a/src/graph/executor/test/DedupTest.cpp +++ b/src/graph/executor/test/DedupTest.cpp @@ -18,7 +18,7 @@ class DedupTest : public QueryTestBase { void SetUp() override { QueryTestBase::SetUp(); } }; -#define DEDUP_RESUTL_CHECK(inputName, outputName, sentence, expected) \ +#define DEDUP_RESULT_CHECK(inputName, outputName, sentence, expected) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto yieldSentence = getYieldSentence(sentence, qctx_.get()); \ @@ -43,10 +43,10 @@ class DedupTest : public QueryTestBase { \ auto proExe = std::make_unique(project, qctx_.get()); \ EXPECT_TRUE(proExe->execute().get().ok()); \ - auto& proSesult = qctx_->ectx()->getResult(project->outputVar()); \ + auto& proResult = qctx_->ectx()->getResult(project->outputVar()); \ \ - EXPECT_EQ(proSesult.value().getDataSet(), expected); \ - EXPECT_EQ(proSesult.state(), Result::State::kSuccess); \ + EXPECT_EQ(proResult.value().getDataSet(), expected); \ + EXPECT_EQ(proResult.state(), Result::State::kSuccess); \ } while (false) TEST_F(DedupTest, TestSequential) { @@ -60,17 +60,17 @@ TEST_F(DedupTest, TestSequential) { auto sentence = "YIELD DISTINCT $-.vid as vid, $-.v_name as name, $-.v_age as age, " "$-.v_dst as dst, $-.e_start_year as start, $-.e_end_year as end"; - DEDUP_RESUTL_CHECK("input_sequential", "dedup_sequential", sentence, expected); + DEDUP_RESULT_CHECK("input_sequential", "dedup_sequential", sentence, expected); } TEST_F(DedupTest, TestEmpty) { DataSet expected({"name"}); - DEDUP_RESUTL_CHECK("empty", "dedup_sequential", "YIELD DISTINCT $-.v_dst as name", expected); + DEDUP_RESULT_CHECK("empty", "dedup_sequential", "YIELD DISTINCT $-.v_dst as name", expected); } TEST_F(DedupTest, WrongTypeIterator) { DataSet expected; - DEDUP_RESUTL_CHECK( + DEDUP_RESULT_CHECK( "input_neighbor", "dedup_sequential", "YIELD DISTINCT $-.v_dst as name", expected); } } // namespace graph diff --git a/src/graph/executor/test/FilterTest.cpp b/src/graph/executor/test/FilterTest.cpp index dcbb5fec98a..883e331ef0c 100644 --- a/src/graph/executor/test/FilterTest.cpp +++ b/src/graph/executor/test/FilterTest.cpp @@ -19,7 +19,7 @@ class FilterTest : public QueryTestBase { void SetUp() override { QueryTestBase::SetUp(); } }; -#define FILTER_RESUTL_CHECK(inputName, outputName, sentence, expected) \ +#define FILTER_RESULT_CHECK(inputName, outputName, sentence, expected) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto yieldSentence = getYieldSentence(sentence, qctx_.get()); \ @@ -44,10 +44,10 @@ class FilterTest : public QueryTestBase { \ auto proExe = std::make_unique(project, qctx_.get()); \ EXPECT_TRUE(proExe->execute().get().ok()); \ - auto& proSesult = qctx_->ectx()->getResult(project->outputVar()); \ + auto& proResult = qctx_->ectx()->getResult(project->outputVar()); \ \ - EXPECT_EQ(proSesult.value().getDataSet(), expected); \ - EXPECT_EQ(proSesult.state(), Result::State::kSuccess); \ + EXPECT_EQ(proResult.value().getDataSet(), expected); \ + EXPECT_EQ(proResult.state(), Result::State::kSuccess); \ } while (false) TEST_F(FilterTest, TestGetNeighbors_src_dst) { @@ -55,7 +55,7 @@ TEST_F(FilterTest, TestGetNeighbors_src_dst) { expected.emplace_back(Row({Value("Ann")})); expected.emplace_back(Row({Value("Ann")})); expected.emplace_back(Row({Value("Tom")})); - FILTER_RESUTL_CHECK("input_neighbor", + FILTER_RESULT_CHECK("input_neighbor", "filter_getNeighbor", "YIELD $^.person.name AS name WHERE study.start_year >= 2010", expected); @@ -65,7 +65,7 @@ TEST_F(FilterTest, TestSequential) { DataSet expected({"name"}); expected.emplace_back(Row({Value("Ann")})); expected.emplace_back(Row({Value("Ann")})); - FILTER_RESUTL_CHECK("input_sequential", + FILTER_RESULT_CHECK("input_sequential", "filter_sequential", "YIELD $-.v_name AS name WHERE $-.e_start_year >= 2010", expected); @@ -73,13 +73,13 @@ TEST_F(FilterTest, TestSequential) { TEST_F(FilterTest, TestNullValue) { DataSet expected({"name"}); - FILTER_RESUTL_CHECK( + FILTER_RESULT_CHECK( "input_sequential", "filter_sequential", "YIELD $-.v_name AS name WHERE NULL", expected); } TEST_F(FilterTest, TestEmpty) { DataSet expected({"name"}); - FILTER_RESUTL_CHECK("empty", + FILTER_RESULT_CHECK("empty", "filter_empty", "YIELD $^.person.name AS name WHERE study.start_year >= 2010", expected); diff --git a/src/graph/executor/test/LimitTest.cpp b/src/graph/executor/test/LimitTest.cpp index 06305fa7ec8..3117d0fa826 100644 --- a/src/graph/executor/test/LimitTest.cpp +++ b/src/graph/executor/test/LimitTest.cpp @@ -17,7 +17,7 @@ namespace nebula { namespace graph { class LimitTest : public QueryTestBase {}; -#define LIMIT_RESUTL_CHECK(outputName, offset, count, expected) \ +#define LIMIT_RESULT_CHECK(outputName, offset, count, expected) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto start = StartNode::make(qctx_.get()); \ @@ -45,7 +45,7 @@ TEST_F(LimitTest, SequentialInRange1) { DataSet expected({"name", "start"}); expected.emplace_back(Row({Value("Joy"), Value(2009)})); expected.emplace_back(Row({Value("Tom"), Value(2008)})); - LIMIT_RESUTL_CHECK("limit_in_sequential1", 1, 2, expected); + LIMIT_RESULT_CHECK("limit_in_sequential1", 1, 2, expected); } TEST_F(LimitTest, SequentialInRange2) { @@ -54,7 +54,7 @@ TEST_F(LimitTest, SequentialInRange2) { expected.emplace_back(Row({Value("Joy"), Value(2009)})); expected.emplace_back(Row({Value("Tom"), Value(2008)})); expected.emplace_back(Row({Value("Kate"), Value(2009)})); - LIMIT_RESUTL_CHECK("limit_in_sequential2", 0, 4, expected); + LIMIT_RESULT_CHECK("limit_in_sequential2", 0, 4, expected); } TEST_F(LimitTest, SequentialOutRange1) { @@ -65,12 +65,12 @@ TEST_F(LimitTest, SequentialOutRange1) { expected.emplace_back(Row({Value("Kate"), Value(2009)})); expected.emplace_back(Row({Value("Ann"), Value(2010)})); expected.emplace_back(Row({Value("Lily"), Value(2009)})); - LIMIT_RESUTL_CHECK("limit_out_sequential1", 0, 7, expected); + LIMIT_RESULT_CHECK("limit_out_sequential1", 0, 7, expected); } TEST_F(LimitTest, getNeighborOutRange2) { DataSet expected({"name", "start"}); - LIMIT_RESUTL_CHECK("limit_out_sequential2", 6, 2, expected); + LIMIT_RESULT_CHECK("limit_out_sequential2", 6, 2, expected); } } // namespace graph } // namespace nebula diff --git a/src/graph/executor/test/ProduceAllPathsTest.cpp b/src/graph/executor/test/ProduceAllPathsTest.cpp index 06c9dfe6fd4..ebce7928286 100644 --- a/src/graph/executor/test/ProduceAllPathsTest.cpp +++ b/src/graph/executor/test/ProduceAllPathsTest.cpp @@ -198,7 +198,7 @@ class ProduceAllPathsTest : public testing::Test { ds3.rows.emplace_back(std::move(row)); } } - thridStepResult_ = std::move(ds3); + thirdStepResult_ = std::move(ds3); { DataSet ds; @@ -221,7 +221,7 @@ class ProduceAllPathsTest : public testing::Test { std::unique_ptr qctx_; DataSet firstStepResult_; DataSet secondStepResult_; - DataSet thridStepResult_; + DataSet thirdStepResult_; }; TEST_F(ProduceAllPathsTest, AllPath) { @@ -408,7 +408,7 @@ TEST_F(ProduceAllPathsTest, AllPath) { { ResultBuilder builder; List datasets; - datasets.values.emplace_back(std::move(thridStepResult_)); + datasets.values.emplace_back(std::move(thirdStepResult_)); builder.value(std::move(datasets)).iter(Iterator::Kind::kGetNeighbors); qctx_->ectx()->setResult("input", builder.build()); diff --git a/src/graph/executor/test/ProduceSemiShortestPathTest.cpp b/src/graph/executor/test/ProduceSemiShortestPathTest.cpp index 92f49b08d0b..963d7d2f453 100644 --- a/src/graph/executor/test/ProduceSemiShortestPathTest.cpp +++ b/src/graph/executor/test/ProduceSemiShortestPathTest.cpp @@ -192,7 +192,7 @@ class ProduceSemiShortestPathTest : public testing::Test { ds3.rows.emplace_back(std::move(row)); } } - thridStepResult_ = std::move(ds3); + thirdStepResult_ = std::move(ds3); { DataSet ds; @@ -215,7 +215,7 @@ class ProduceSemiShortestPathTest : public testing::Test { std::unique_ptr qctx_; DataSet firstStepResult_; DataSet secondStepResult_; - DataSet thridStepResult_; + DataSet thirdStepResult_; }; TEST_F(ProduceSemiShortestPathTest, ShortestPath) { @@ -441,7 +441,7 @@ TEST_F(ProduceSemiShortestPathTest, ShortestPath) { { ResultBuilder builder; List datasets; - datasets.values.emplace_back(std::move(thridStepResult_)); + datasets.values.emplace_back(std::move(thirdStepResult_)); builder.value(std::move(datasets)).iter(Iterator::Kind::kGetNeighbors); qctx_->ectx()->setResult("input", builder.build()); diff --git a/src/graph/executor/test/SampleTest.cpp b/src/graph/executor/test/SampleTest.cpp index e52f5ca9dca..d786b0f802f 100644 --- a/src/graph/executor/test/SampleTest.cpp +++ b/src/graph/executor/test/SampleTest.cpp @@ -17,7 +17,7 @@ namespace nebula { namespace graph { class SampleTest : public QueryTestBase {}; -#define SAMPLE_RESUTL_CHECK(outputName, count, expect) \ +#define SAMPLE_RESULT_CHECK(outputName, count, expect) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto start = StartNode::make(qctx_.get()); \ @@ -41,13 +41,13 @@ class SampleTest : public QueryTestBase {}; EXPECT_EQ(proResult.state(), Result::State::kSuccess); \ } while (false) -TEST_F(SampleTest, SequentialInRange2) { SAMPLE_RESUTL_CHECK("sample_in_sequential2", 4, 4); } +TEST_F(SampleTest, SequentialInRange2) { SAMPLE_RESULT_CHECK("sample_in_sequential2", 4, 4); } -TEST_F(SampleTest, SequentialOutRange1) { SAMPLE_RESUTL_CHECK("sample_out_sequential3", 7, 6); } +TEST_F(SampleTest, SequentialOutRange1) { SAMPLE_RESULT_CHECK("sample_out_sequential3", 7, 6); } -TEST_F(SampleTest, SequentialOutRange2) { SAMPLE_RESUTL_CHECK("sample_out_sequential4", 8, 6); } +TEST_F(SampleTest, SequentialOutRange2) { SAMPLE_RESULT_CHECK("sample_out_sequential4", 8, 6); } -#define SAMPLE_GN_RESUTL_CHECK(outputName, count, expect) \ +#define SAMPLE_GN_RESULT_CHECK(outputName, count, expect) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto start = StartNode::make(qctx_.get()); \ @@ -70,11 +70,11 @@ TEST_F(SampleTest, SequentialOutRange2) { SAMPLE_RESUTL_CHECK("sample_out_sequen EXPECT_EQ(proResult.state(), Result::State::kSuccess); \ } while (false) -TEST_F(SampleTest, GN2) { SAMPLE_GN_RESUTL_CHECK("sample_in_gn2", 4, 4); } +TEST_F(SampleTest, GN2) { SAMPLE_GN_RESULT_CHECK("sample_in_gn2", 4, 4); } -TEST_F(SampleTest, GNOutOfRange1) { SAMPLE_GN_RESUTL_CHECK("sample_out_gn1", 6, 4); } +TEST_F(SampleTest, GNOutOfRange1) { SAMPLE_GN_RESULT_CHECK("sample_out_gn1", 6, 4); } -TEST_F(SampleTest, GNOutOfRange2) { SAMPLE_GN_RESUTL_CHECK("sample_out_gn2", 7, 4); } +TEST_F(SampleTest, GNOutOfRange2) { SAMPLE_GN_RESULT_CHECK("sample_out_gn2", 7, 4); } } // namespace graph } // namespace nebula diff --git a/src/graph/executor/test/SetExecutorTest.cpp b/src/graph/executor/test/SetExecutorTest.cpp index aee6e891a24..9bac46afe25 100644 --- a/src/graph/executor/test/SetExecutorTest.cpp +++ b/src/graph/executor/test/SetExecutorTest.cpp @@ -157,7 +157,7 @@ TEST_F(SetExecutorTest, TestUnionAll) { } } -TEST_F(SetExecutorTest, TestGetNeighobrsIterator) { +TEST_F(SetExecutorTest, TestGetNeighborsIterator) { auto left = StartNode::make(qctx_.get()); auto right = StartNode::make(qctx_.get()); auto unionNode = Union::make(qctx_.get(), left, right); diff --git a/src/graph/executor/test/SortTest.cpp b/src/graph/executor/test/SortTest.cpp index ac2774d4470..f7223c158a7 100644 --- a/src/graph/executor/test/SortTest.cpp +++ b/src/graph/executor/test/SortTest.cpp @@ -17,7 +17,7 @@ namespace graph { class SortTest : public QueryTestBase {}; -#define SORT_RESUTL_CHECK(input_name, outputName, multi, factors, expected) \ +#define SORT_RESULT_CHECK(input_name, outputName, multi, factors, expected) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto start = StartNode::make(qctx_.get()); \ @@ -59,7 +59,7 @@ TEST_F(SortTest, sortOneColAsc) { expected.emplace_back(Row({Value::kNullValue})); std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); - SORT_RESUTL_CHECK("input_sequential", "sort_one_col_asc", false, factors, expected); + SORT_RESULT_CHECK("input_sequential", "sort_one_col_asc", false, factors, expected); } TEST_F(SortTest, sortOneColDes) { @@ -72,7 +72,7 @@ TEST_F(SortTest, sortOneColDes) { expected.emplace_back(Row({18})); std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); - SORT_RESUTL_CHECK("input_sequential", "sort_one_col_des", false, factors, expected); + SORT_RESULT_CHECK("input_sequential", "sort_one_col_des", false, factors, expected); } TEST_F(SortTest, sortTwoColsAscAsc) { @@ -86,7 +86,7 @@ TEST_F(SortTest, sortTwoColsAscAsc) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::ASCEND)); - SORT_RESUTL_CHECK("input_sequential", "sort_two_cols_asc_asc", true, factors, expected); + SORT_RESULT_CHECK("input_sequential", "sort_two_cols_asc_asc", true, factors, expected); } TEST_F(SortTest, sortTwoColsAscDes) { @@ -100,7 +100,7 @@ TEST_F(SortTest, sortTwoColsAscDes) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::DESCEND)); - SORT_RESUTL_CHECK("input_sequential", "sort_two_cols_asc_des", true, factors, expected); + SORT_RESULT_CHECK("input_sequential", "sort_two_cols_asc_des", true, factors, expected); } TEST_F(SortTest, sortTwoColDesDes) { @@ -114,7 +114,7 @@ TEST_F(SortTest, sortTwoColDesDes) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::DESCEND)); - SORT_RESUTL_CHECK("input_sequential", "sort_two_cols_des_des", true, factors, expected); + SORT_RESULT_CHECK("input_sequential", "sort_two_cols_des_des", true, factors, expected); } TEST_F(SortTest, sortTwoColDesDes_union) { @@ -128,7 +128,7 @@ TEST_F(SortTest, sortTwoColDesDes_union) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::DESCEND)); - SORT_RESUTL_CHECK("union_sequential", "union_sort_two_cols_des_des", true, factors, expected); + SORT_RESULT_CHECK("union_sequential", "union_sort_two_cols_des_des", true, factors, expected); } } // namespace graph } // namespace nebula diff --git a/src/graph/executor/test/TopNTest.cpp b/src/graph/executor/test/TopNTest.cpp index 24170b8c0d6..d5778a11132 100644 --- a/src/graph/executor/test/TopNTest.cpp +++ b/src/graph/executor/test/TopNTest.cpp @@ -17,7 +17,7 @@ namespace graph { class TopNTest : public QueryTestBase {}; -#define TOPN_RESUTL_CHECK(input_name, outputName, multi, factors, offset, count, expected) \ +#define TOPN_RESULT_CHECK(input_name, outputName, multi, factors, offset, count, expected) \ do { \ qctx_->symTable()->newVariable(outputName); \ auto start = StartNode::make(qctx_.get()); \ @@ -57,7 +57,7 @@ TEST_F(TopNTest, topnOneColAsc) { expected.emplace_back(Row({20})); std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_one_col_asc", false, factors, 0, 4, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_one_col_asc", false, factors, 0, 4, expected); } TEST_F(TopNTest, topnOneColDes) { @@ -68,7 +68,7 @@ TEST_F(TopNTest, topnOneColDes) { expected.emplace_back(Row({18})); std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_one_col_des", false, factors, 2, 9, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_one_col_des", false, factors, 2, 9, expected); } TEST_F(TopNTest, topnTwoColsAscAsc) { @@ -79,7 +79,7 @@ TEST_F(TopNTest, topnTwoColsAscAsc) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::ASCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_two_cols_asc_asc", true, factors, 2, 3, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_two_cols_asc_asc", true, factors, 2, 3, expected); } TEST_F(TopNTest, topnTwoColsAscDes) { @@ -89,7 +89,7 @@ TEST_F(TopNTest, topnTwoColsAscDes) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::ASCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::DESCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_two_cols_asc_des", true, factors, 0, 2, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_two_cols_asc_des", true, factors, 0, 2, expected); } TEST_F(TopNTest, topnTwoColDesDes) { @@ -97,7 +97,7 @@ TEST_F(TopNTest, topnTwoColDesDes) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::DESCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_two_cols_des_des", true, factors, 10, 5, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_two_cols_des_des", true, factors, 10, 5, expected); } TEST_F(TopNTest, topnTwoColDesAsc) { @@ -110,7 +110,7 @@ TEST_F(TopNTest, topnTwoColDesAsc) { std::vector> factors; factors.emplace_back(std::make_pair(2, OrderFactor::OrderType::DESCEND)); factors.emplace_back(std::make_pair(4, OrderFactor::OrderType::ASCEND)); - TOPN_RESUTL_CHECK("input_sequential", "topn_two_cols_des_asc", true, factors, 1, 9, expected); + TOPN_RESULT_CHECK("input_sequential", "topn_two_cols_des_asc", true, factors, 1, 9, expected); } } // namespace graph } // namespace nebula diff --git a/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp b/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp index 13a01c52db2..3acad7ba487 100644 --- a/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp +++ b/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp @@ -122,7 +122,7 @@ StatusOr GeoPredicateIndexScanBaseRule::transform( auto scanNode = IndexScan::make(ctx->qctx(), nullptr); OptimizerUtils::copyIndexScanData(scan, scanNode); scanNode->setIndexQueryContext(std::move(idxCtxs)); - // TODO(jie): geo predicate's caculation is a little heavy, + // TODO(jie): geo predicate's calculation is a little heavy, // which is not suitable to push down to the storage scanNode->setOutputVar(filter->outputVar()); scanNode->setColNames(filter->colNames()); diff --git a/src/graph/optimizer/rule/IndexScanRule.cpp b/src/graph/optimizer/rule/IndexScanRule.cpp index 399a4987ee1..454e08e2d5a 100644 --- a/src/graph/optimizer/rule/IndexScanRule.cpp +++ b/src/graph/optimizer/rule/IndexScanRule.cpp @@ -241,7 +241,7 @@ Status IndexScanRule::appendColHint(std::vector& hints, begin = {item.value_, true}; break; } - // because only type for bool is true/false, which can not satisify [start, + // because only type for bool is true/false, which can not satisfy [start, // end) if (col.get_type().get_type() == nebula::cpp2::PropertyType::BOOL) { return Status::SemanticError("Range scan for bool type is illegal"); diff --git a/src/graph/planner/PlannersRegister.cpp b/src/graph/planner/PlannersRegister.cpp index 04bab266396..e682ad243b8 100644 --- a/src/graph/planner/PlannersRegister.cpp +++ b/src/graph/planner/PlannersRegister.cpp @@ -23,13 +23,13 @@ namespace nebula { namespace graph { -void PlannersRegister::registPlanners() { - registDDL(); - registSequential(); - registMatch(); +void PlannersRegister::registerPlanners() { + registerDDL(); + registerSequential(); + registerMatch(); } -void PlannersRegister::registDDL() { +void PlannersRegister::registerDDL() { { auto& planners = Planner::plannersMap()[Sentence::Kind::kAlterTag]; planners.emplace_back(&AlterTagPlanner::match, &AlterTagPlanner::make); @@ -48,7 +48,7 @@ void PlannersRegister::registDDL() { } } -void PlannersRegister::registSequential() { +void PlannersRegister::registerSequential() { { auto& planners = Planner::plannersMap()[Sentence::Kind::kSequential]; planners.emplace_back(&SequentialPlanner::match, &SequentialPlanner::make); @@ -79,7 +79,7 @@ void PlannersRegister::registSequential() { } } -void PlannersRegister::registMatch() { +void PlannersRegister::registerMatch() { auto& planners = Planner::plannersMap()[Sentence::Kind::kMatch]; planners.emplace_back(&MatchPlanner::match, &MatchPlanner::make); diff --git a/src/graph/planner/PlannersRegister.h b/src/graph/planner/PlannersRegister.h index 5577e6e1588..bdadb940756 100644 --- a/src/graph/planner/PlannersRegister.h +++ b/src/graph/planner/PlannersRegister.h @@ -14,12 +14,12 @@ class PlannersRegister final { PlannersRegister() = delete; ~PlannersRegister() = delete; - static void registPlanners(); + static void registerPlanners(); private: - static void registDDL(); - static void registSequential(); - static void registMatch(); + static void registerDDL(); + static void registerSequential(); + static void registerMatch(); }; } // namespace graph diff --git a/src/graph/planner/match/LabelIndexSeek.cpp b/src/graph/planner/match/LabelIndexSeek.cpp index ae9e6473be6..baf9e9266d1 100644 --- a/src/graph/planner/match/LabelIndexSeek.cpp +++ b/src/graph/planner/match/LabelIndexSeek.cpp @@ -17,7 +17,7 @@ bool LabelIndexSeek::matchNode(NodeContext* nodeCtx) { // only require the tag if (node.tids.size() != 1) { // TODO multiple tag index seek need the IndexScan support - VLOG(2) << "Multple tag index seek is not supported now."; + VLOG(2) << "Multiple tag index seek is not supported now."; return false; } @@ -81,7 +81,7 @@ StatusOr LabelIndexSeek::transformNode(NodeContext* nodeCtx) { plan.tail = scan; plan.root = scan; - // This if-block is a patch for or-filter-embeding to avoid OOM, + // This if-block is a patch for or-filter-embedding to avoid OOM, // and it should be converted to an `optRule` after the match validator is // refactored auto& whereCtx = matchClauseCtx->where; @@ -104,19 +104,19 @@ StatusOr LabelIndexSeek::transformNode(NodeContext* nodeCtx) { auto flattenFilter = ExpressionUtils::flattenInnerLogicalExpr(filter); DCHECK_EQ(flattenFilter->kind(), Expression::Kind::kLogicalOr); auto& filterItems = static_cast(flattenFilter)->operands(); - auto canBeEmbeded = [](Expression::Kind k) -> bool { + auto canBeEmbedded = [](Expression::Kind k) -> bool { return k == Expression::Kind::kRelEQ || k == Expression::Kind::kRelLT || k == Expression::Kind::kRelLE || k == Expression::Kind::kRelGT || k == Expression::Kind::kRelGE; }; - bool canBeEmbeded2IndexScan = true; + bool canBeEmbedded2IndexScan = true; for (auto& f : filterItems) { - if (!canBeEmbeded(f->kind())) { - canBeEmbeded2IndexScan = false; + if (!canBeEmbedded(f->kind())) { + canBeEmbedded2IndexScan = false; break; } } - if (canBeEmbeded2IndexScan) { + if (canBeEmbedded2IndexScan) { auto* srcFilter = ExpressionUtils::rewriteLabelAttr2TagProp(flattenFilter); storage::cpp2::IndexQueryContext ctx; ctx.set_filter(Expression::encode(*srcFilter)); diff --git a/src/graph/planner/match/MatchSolver.cpp b/src/graph/planner/match/MatchSolver.cpp index 92e9832199d..825eb2ad1ab 100644 --- a/src/graph/planner/match/MatchSolver.cpp +++ b/src/graph/planner/match/MatchSolver.cpp @@ -143,7 +143,7 @@ Expression* MatchSolver::makeIndexFilter(const std::string& label, auto* right = binary->right(); const LabelAttributeExpression* la = nullptr; const ConstantExpression* constant = nullptr; - // TODO(aiee) extract the logic that apllies to both match and lookup + // TODO(aiee) extract the logic that applies to both match and lookup if (left->kind() == Expression::Kind::kLabelAttribute && right->kind() == Expression::Kind::kConstant) { la = static_cast(left); diff --git a/src/graph/planner/match/PropIndexSeek.cpp b/src/graph/planner/match/PropIndexSeek.cpp index 257f24f7e46..dfbd33f39c5 100644 --- a/src/graph/planner/match/PropIndexSeek.cpp +++ b/src/graph/planner/match/PropIndexSeek.cpp @@ -121,7 +121,7 @@ bool PropIndexSeek::matchNode(NodeContext* nodeCtx) { auto& node = *nodeCtx->info; if (node.labels.size() != 1) { // TODO multiple tag index seek need the IndexScan support - VLOG(2) << "Multple tag index seek is not supported now."; + VLOG(2) << "Multiple tag index seek is not supported now."; return false; } diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index c0dee822263..a49ea464c53 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -313,7 +313,7 @@ class DropSnapshot final : public SingleDependencyNode { std::unique_ptr explain() const override; - const std::string& getShapshotName() const { return snapshotName_; } + const std::string& getSnapshotName() const { return snapshotName_; } private: explicit DropSnapshot(QueryContext* qctx, PlanNode* input, std::string snapshotName) diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 587041ee401..f9d1bcfc4a2 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -32,7 +32,7 @@ PlanNode::PlanNode(QueryContext* qctx, Kind kind) : qctx_(qctx), kind_(kind) { const char* PlanNode::toString(PlanNode::Kind kind) { switch (kind) { case Kind::kUnknown: - return "Unkonwn"; + return "Unknown"; case Kind::kStart: return "Start"; case Kind::kGetNeighbors: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index 83ce1c9e1be..ed50f0a9c51 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -271,7 +271,7 @@ std::ostream& operator<<(std::ostream& os, PlanNode::Kind kind); // Dependencies will cover the inputs, For example bi input require bi // dependencies as least, but single dependencies may don't need any inputs (I.E -// admin plan node) Single dependecy without input It's useful for admin plan +// admin plan node) Single dependency without input It's useful for admin plan // node class SingleDependencyNode : public PlanNode { public: diff --git a/src/graph/planner/plan/Query.h b/src/graph/planner/plan/Query.h index 13629bf2ef5..2f74d3883d7 100644 --- a/src/graph/planner/plan/Query.h +++ b/src/graph/planner/plan/Query.h @@ -20,7 +20,7 @@ namespace nebula { namespace graph { /** - * Now we hava four kind of exploration nodes: + * Now we have four kind of exploration nodes: * GetNeighbors, * GetVertices, * GetEdges, diff --git a/src/graph/scheduler/AsyncMsgNotifyBasedScheduler.h b/src/graph/scheduler/AsyncMsgNotifyBasedScheduler.h index 90ba9a657d9..4610a5cd15e 100644 --- a/src/graph/scheduler/AsyncMsgNotifyBasedScheduler.h +++ b/src/graph/scheduler/AsyncMsgNotifyBasedScheduler.h @@ -13,7 +13,7 @@ namespace nebula { namespace graph { /** - * This is an scheluder implementation based on asynchronous message + * This is an scheduler implementation based on asynchronous message * notification and bread first search. Each node in execution plan would be * triggered to run when the node itself receives all the messages that send by * its dependencies. And once the node is done running, it will send a message diff --git a/src/graph/service/GraphFlags.cpp b/src/graph/service/GraphFlags.cpp index af8e641e478..c95272d563e 100644 --- a/src/graph/service/GraphFlags.cpp +++ b/src/graph/service/GraphFlags.cpp @@ -68,6 +68,6 @@ DEFINE_bool(enable_experimental_feature, false, "Whether to enable experimental DEFINE_bool(enable_client_white_list, true, "Turn on/off the client white list."); DEFINE_string(client_white_list, nebula::getOriginVersion() + ":2.5.0:2.5.1:2.6.0", - "A white list for different client versions, seperate with colon."); + "A white list for different client versions, separate with colon."); DEFINE_int32(num_rows_to_check_memory, 1024, "number rows to check memory"); diff --git a/src/graph/service/QueryEngine.cpp b/src/graph/service/QueryEngine.cpp index 4fcc8d78608..b5d47df8e3a 100644 --- a/src/graph/service/QueryEngine.cpp +++ b/src/graph/service/QueryEngine.cpp @@ -32,7 +32,7 @@ Status QueryEngine::init(std::shared_ptr ioExecutor storage_ = std::make_unique(ioExecutor, metaClient_); charsetInfo_ = CharsetInfo::instance(); - PlannersRegister::registPlanners(); + PlannersRegister::registerPlanners(); std::vector rulesets{&opt::RuleSet::DefaultRules()}; if (FLAGS_enable_optimizer) { diff --git a/src/graph/service/RequestContext.h b/src/graph/service/RequestContext.h index 2486fd72453..a6ccc688543 100644 --- a/src/graph/service/RequestContext.h +++ b/src/graph/service/RequestContext.h @@ -19,7 +19,7 @@ * 1. Create a RequestContext, with statement, session, etc. * 2. Obtain a Future from the context, which is to be returned back to the * Thrift framework. - * 3. Prepare the Response when the request is complished. + * 3. Prepare the Response when the request is completed. * 4. Call `finish' to send the response to the client. */ diff --git a/src/graph/util/ExpressionUtils.cpp b/src/graph/util/ExpressionUtils.cpp index 8b32888bb7a..aff0c9f14ce 100644 --- a/src/graph/util/ExpressionUtils.cpp +++ b/src/graph/util/ExpressionUtils.cpp @@ -354,7 +354,7 @@ Expression *ExpressionUtils::reduceUnaryNotExpr(const Expression *expr) { Expression *ExpressionUtils::rewriteRelExpr(const Expression *expr) { ObjectPool *pool = expr->getObjPool(); - // Match relational expressions containing at least one airthmetic expr + // Match relational expressions containing at least one arithmetic expr auto matcher = [](const Expression *e) -> bool { if (e->isRelExpr()) { auto relExpr = static_cast(e); @@ -421,7 +421,7 @@ Expression *ExpressionUtils::rewriteRelExpr(const Expression *expr) { Expression *ExpressionUtils::rewriteRelExprHelper(const Expression *expr, Expression *&relRightOperandExpr) { ObjectPool *pool = expr->getObjPool(); - // TODO: Support rewrite mul/div expressoion after fixing overflow + // TODO: Support rewrite mul/div expression after fixing overflow auto matcher = [](const Expression *e) -> bool { if (!e->isArithmeticExpr() || e->kind() == Expression::Kind::kMultiply || e->kind() == Expression::Kind::kDivision) @@ -456,7 +456,7 @@ Expression *ExpressionUtils::rewriteRelExprHelper(const Expression *expr, case Expression::Kind::kMinus: relRightOperandExpr = ArithmeticExpression::makeMinus(pool, lexpr, rexpr); break; - // Unsupported arithm kind + // Unsupported arithmetic kind // case Expression::Kind::kMultiply: // case Expression::Kind::kDivision: default: diff --git a/src/graph/validator/AdminJobValidator.h b/src/graph/validator/AdminJobValidator.h index e576db54c20..7d755da88e7 100644 --- a/src/graph/validator/AdminJobValidator.h +++ b/src/graph/validator/AdminJobValidator.h @@ -39,7 +39,7 @@ class AdminJobValidator final : public Validator { case meta::cpp2::AdminCmd::DATA_BALANCE: case meta::cpp2::AdminCmd::LEADER_BALANCE: return true; - // TODO: Also space related, but not available in CreateJobExcutor now. + // TODO: Also space related, but not available in CreateJobExecutor now. case meta::cpp2::AdminCmd::DOWNLOAD: case meta::cpp2::AdminCmd::INGEST: case meta::cpp2::AdminCmd::UNKNOWN: diff --git a/src/graph/validator/FetchVerticesValidator.h b/src/graph/validator/FetchVerticesValidator.h index 476b70c37c8..88f87f6762a 100644 --- a/src/graph/validator/FetchVerticesValidator.h +++ b/src/graph/validator/FetchVerticesValidator.h @@ -21,7 +21,7 @@ class FetchVerticesValidator final : public Validator { private: Status validateImpl() override; - Status validateTag(const NameLabelList* nameLables); + Status validateTag(const NameLabelList* nameLabels); Status validateYield(YieldClause* yield); diff --git a/src/graph/validator/LookupValidator.cpp b/src/graph/validator/LookupValidator.cpp index 640dc4b02a9..d5d18e7eb4a 100644 --- a/src/graph/validator/LookupValidator.cpp +++ b/src/graph/validator/LookupValidator.cpp @@ -469,7 +469,7 @@ StatusOr LookupValidator::checkConstExpr(Expression* expr, // Check prop type if (v.type() != SchemaUtil::propTypeToValueType(type)) { - // allow diffrent types in the IN expression, such as "abc" IN ["abc"] + // allow different types in the IN expression, such as "abc" IN ["abc"] if (!expr->isContainerExpr()) { return Status::SemanticError("Column type error : %s", prop.c_str()); } diff --git a/src/graph/validator/MaintainValidator.cpp b/src/graph/validator/MaintainValidator.cpp index 7c8f9a0dcc7..73483e092ac 100644 --- a/src/graph/validator/MaintainValidator.cpp +++ b/src/graph/validator/MaintainValidator.cpp @@ -37,7 +37,7 @@ static Status validateColumns(const std::vector &columnSp column.set_nullable(property->nullable()); } else if (property->isDefaultValue()) { if (!ExpressionUtils::isEvaluableExpr(property->defaultValue())) { - return Status::SemanticError("Wrong default value experssion `%s'", + return Status::SemanticError("Wrong default value expression `%s'", property->defaultValue()->toString().c_str()); } auto *defaultValueExpr = property->defaultValue(); diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index c2c93d8d024..d3ca0b6f1cd 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -718,8 +718,8 @@ Status MatchValidator::validateGroup(YieldClauseContext &yieldCtx) const { yieldCtx.aggOutputColumnNames_.emplace_back(agg->toString()); } if (!aggs.empty()) { - auto *rewritedExpr = ExpressionUtils::rewriteAgg2VarProp(colExpr); - yieldCtx.projCols_->addColumn(new YieldColumn(rewritedExpr, colOldName)); + auto *rewrittenExpr = ExpressionUtils::rewriteAgg2VarProp(colExpr); + yieldCtx.projCols_->addColumn(new YieldColumn(rewrittenExpr, colOldName)); yieldCtx.projOutputColumnNames_.emplace_back(colOldName); continue; } diff --git a/src/graph/validator/Validator.h b/src/graph/validator/Validator.h index e2c3fa0ce4d..0568d0eb054 100644 --- a/src/graph/validator/Validator.h +++ b/src/graph/validator/Validator.h @@ -73,7 +73,7 @@ class Validator { void setNoSpaceRequired() { noSpaceRequired_ = true; } - // Whether require choosen space + // Whether require chosen space bool noSpaceRequired() const { return noSpaceRequired_; } const Sentence* sentence() const { return sentence_; } diff --git a/src/graph/validator/test/FetchEdgesTest.cpp b/src/graph/validator/test/FetchEdgesTest.cpp index 8ac2794ae21..5651c0b391c 100644 --- a/src/graph/validator/test/FetchEdgesTest.cpp +++ b/src/graph/validator/test/FetchEdgesTest.cpp @@ -286,10 +286,10 @@ TEST_F(FetchEdgesValidatorTest, FetchEdgesPropFailed) { ASSERT_FALSE(validate("FETCH PROP ON edge1 \"1\"->\"2\" YIELD $$.player.name")); ASSERT_FALSE(validate("FETCH PROP ON edge1 \"1\"->\"2\" YIELD $^.player.name")); - // notexist edge + // nonexistent edge ASSERT_FALSE(validate("FETCH PROP ON not_exist_edge \"1\"->\"2\" YIELD not_exist_edge.prop1")); - // notexist edge property + // nonexistent edge property ASSERT_FALSE(validate("FETCH PROP ON like \"1\"->\"2\" YIELD like.not_exist_prop")); // invalid yield expression diff --git a/src/graph/validator/test/FetchVerticesTest.cpp b/src/graph/validator/test/FetchVerticesTest.cpp index bfa886b81cf..52bcf7eef41 100644 --- a/src/graph/validator/test/FetchVerticesTest.cpp +++ b/src/graph/validator/test/FetchVerticesTest.cpp @@ -673,7 +673,7 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesPropFailed) { } TEST_F(FetchVerticesValidatorTest, FetchVerticesInputFailed) { - // mismatched varirable + // mismatched variable ASSERT_FALSE( validate("$a = FETCH PROP ON person \"1\" YIELD person.name AS name;" "FETCH PROP ON person $b.name YIELD vertex as node")); @@ -681,7 +681,7 @@ TEST_F(FetchVerticesValidatorTest, FetchVerticesInputFailed) { validate("$a = FETCH PROP ON * \"1\" YIELD person.name AS name;" "FETCH PROP * person $b.name YIELD vertex as node")); - // mismatched varirable property + // mismatched variable property ASSERT_FALSE( validate("$a = FETCH PROP ON person \"1\" YIELD person.name AS name;" "FETCH PROP ON person $a.not_exist_property YIELD vertex as node")); diff --git a/src/graph/validator/test/MockSchemaManager.h b/src/graph/validator/test/MockSchemaManager.h index 46b14c1eb6a..e13ef680d21 100644 --- a/src/graph/validator/test/MockSchemaManager.h +++ b/src/graph/validator/test/MockSchemaManager.h @@ -76,7 +76,7 @@ class MockSchemaManager final : public nebula::meta::SchemaManager { return allVerTagSchemas; } - // Returns all latest version of schesmas of all tags in the given space + // Returns all latest version of schemas of all tags in the given space StatusOr getAllLatestVerTagSchema(GraphSpaceID space) override { meta::TagSchema allLatestVerTagSchemas; const auto& tagSchemas = tagSchemas_[space]; diff --git a/src/graph/validator/test/MutateValidatorTest.cpp b/src/graph/validator/test/MutateValidatorTest.cpp index cb8fd8a371a..add40824ae8 100644 --- a/src/graph/validator/test/MutateValidatorTest.cpp +++ b/src/graph/validator/test/MutateValidatorTest.cpp @@ -201,13 +201,13 @@ TEST_F(MutateValidatorTest, UpdateEdgeTest) { auto cmd = "UPDATE EDGE ON study \"Tom\"->\"Lily\" SET count = 1"; ASSERT_FALSE(checkResult(cmd, {})); } - // Wrong expr "$^.peson.age" + // Wrong expr "$^.person_.age" { auto cmd = "UPDATE EDGE \"Tom\"->\"Lily\" OF like " "SET end = like.end + 1 " - "WHEN $^.peson.age >= 18 " - "YIELD $^.peson.age AS age, like.end AS end"; + "WHEN $^.person_.age >= 18 " + "YIELD $^.person_.age AS age, like.end AS end"; ASSERT_FALSE(checkResult(cmd, {})); } // 1.0 syntax succeed diff --git a/src/graph/validator/test/QueryValidatorTest.cpp b/src/graph/validator/test/QueryValidatorTest.cpp index eaa3c449a1c..6af43b84454 100644 --- a/src/graph/validator/test/QueryValidatorTest.cpp +++ b/src/graph/validator/test/QueryValidatorTest.cpp @@ -1015,7 +1015,7 @@ TEST_F(QueryValidatorTest, OrderBy) { } } -TEST_F(QueryValidatorTest, OrderByAndLimt) { +TEST_F(QueryValidatorTest, OrderByAndLimit) { { std::string query = "GO FROM \"Ann\" OVER like YIELD $^.person.age AS age" diff --git a/src/graph/validator/test/ValidatorTestBase.cpp b/src/graph/validator/test/ValidatorTestBase.cpp index 6ed2caa50ba..a76c3f89227 100644 --- a/src/graph/validator/test/ValidatorTestBase.cpp +++ b/src/graph/validator/test/ValidatorTestBase.cpp @@ -195,7 +195,7 @@ Status ValidatorTestBase::EqSelf(const PlanNode *l, const PlanNode *r) { // V // E // this will traversal sub-tree [D->E] twice but not matter the Equal result -// TODO(shylock) maybe need check the toplogy of `Select` and `Loop` +// TODO(shylock) maybe need check the topology of `Select` and `Loop` /*static*/ Status ValidatorTestBase::Eq(const PlanNode *l, const PlanNode *r) { auto result = EqSelf(l, r); if (!result.ok()) { diff --git a/src/graph/validator/test/ValidatorTestBase.h b/src/graph/validator/test/ValidatorTestBase.h index 14c7abe5a8a..38ccf3fbb75 100644 --- a/src/graph/validator/test/ValidatorTestBase.h +++ b/src/graph/validator/test/ValidatorTestBase.h @@ -41,7 +41,7 @@ class ValidatorTestBase : public ::testing::Test { schemaMng_ = CHECK_NOTNULL(MockSchemaManager::makeUnique()); indexMng_ = CHECK_NOTNULL(MockIndexManager::makeUnique()); pool_ = std::make_unique(); - PlannersRegister::registPlanners(); + PlannersRegister::registerPlanners(); } StatusOr validate(const std::string& query) { diff --git a/src/graph/validator/test/YieldValidatorTest.cpp b/src/graph/validator/test/YieldValidatorTest.cpp index ed1ec6070e8..1dec793100b 100644 --- a/src/graph/validator/test/YieldValidatorTest.cpp +++ b/src/graph/validator/test/YieldValidatorTest.cpp @@ -92,7 +92,7 @@ TEST_F(YieldValidatorTest, Logic) { #endif } -TEST_F(YieldValidatorTest, FuncitonCall) { +TEST_F(YieldValidatorTest, FunctionCall) { #if 0 { // TODO not support udf_is_in diff --git a/src/graph/visitor/ExtractPropExprVisitor.h b/src/graph/visitor/ExtractPropExprVisitor.h index 483c68640b5..5eda485de80 100644 --- a/src/graph/visitor/ExtractPropExprVisitor.h +++ b/src/graph/visitor/ExtractPropExprVisitor.h @@ -2,8 +2,8 @@ * * This source code is licensed under Apache 2.0 License. */ -#ifndef GRAPH_VISITOR_EXTRACTPROPEXPRVISITON_H_ -#define GRAPH_VISITOR_EXTRACTPROPEXPRVISITON_H_ +#ifndef GRAPH_VISITOR_EXTRACTPROPEXPRVISITOR_H_ +#define GRAPH_VISITOR_EXTRACTPROPEXPRVISITOR_H_ #include "graph/context/ValidateContext.h" #include "graph/visitor/ExprVisitorImpl.h" @@ -74,4 +74,4 @@ class ExtractPropExprVisitor final : public ExprVisitorImpl { } // namespace graph } // namespace nebula -#endif // GRAPH_VISITOR_EXTRACTPROPEXPRVISITON_H_ +#endif // GRAPH_VISITOR_EXTRACTPROPEXPRVISITOR_H_ diff --git a/src/graph/visitor/FoldConstantExprVisitor.cpp b/src/graph/visitor/FoldConstantExprVisitor.cpp index e8cf157fe14..dd970696774 100644 --- a/src/graph/visitor/FoldConstantExprVisitor.cpp +++ b/src/graph/visitor/FoldConstantExprVisitor.cpp @@ -337,7 +337,7 @@ void FoldConstantExprVisitor::visitBinaryExpr(BinaryExpression *expr) { } Expression *FoldConstantExprVisitor::fold(Expression *expr) { - // Container expresison should remain the same type after being folded + // Container expression should remain the same type after being folded if (expr->isContainerExpr()) { return expr; } diff --git a/src/graph/visitor/FoldConstantExprVisitor.h b/src/graph/visitor/FoldConstantExprVisitor.h index 9a913a31ca4..0aa77b798d4 100644 --- a/src/graph/visitor/FoldConstantExprVisitor.h +++ b/src/graph/visitor/FoldConstantExprVisitor.h @@ -79,7 +79,7 @@ class FoldConstantExprVisitor final : public ExprVisitor { Expression *fold(Expression *expr); private: - // Obejct pool used to manage expressions generated during visiting + // Object pool used to manage expressions generated during visiting ObjectPool *pool_; bool canBeFolded_{false}; Status status_; diff --git a/src/graph/visitor/test/RewriteRelExprVisitorTest.cpp b/src/graph/visitor/test/RewriteRelExprVisitorTest.cpp index 13cffb278a4..1ca02836784 100644 --- a/src/graph/visitor/test/RewriteRelExprVisitorTest.cpp +++ b/src/graph/visitor/test/RewriteRelExprVisitorTest.cpp @@ -35,7 +35,7 @@ TEST_F(RewriteRelExprVisitorTest, TestArithmeticalExpr) { auto expected = ltExpr(laExpr("v", "age"), minusExpr(constantExpr(40), constantExpr(-1))); ASSERT_EQ(*res, *expected) << res->toString() << " vs. " << expected->toString(); } - // (label1 + label2 < 40) => (label1 + label2 < 40) Unchaged + // (label1 + label2 < 40) => (label1 + label2 < 40) Unchanged // TODO: replace list with set in object pool and avoid copy { auto expr = ltExpr(addExpr(laExpr("v", "age"), laExpr("v2", "age2")), constantExpr(40)); diff --git a/src/graph/visitor/test/RewriteUnaryNotExprVisitorTest.cpp b/src/graph/visitor/test/RewriteUnaryNotExprVisitorTest.cpp index c05a6744454..9704a5b6cfc 100644 --- a/src/graph/visitor/test/RewriteUnaryNotExprVisitorTest.cpp +++ b/src/graph/visitor/test/RewriteUnaryNotExprVisitorTest.cpp @@ -105,7 +105,7 @@ TEST_F(RewriteUnaryNotExprVisitorTest, TestMultipleUnaryNotContainerExpr) { TEST_F(RewriteUnaryNotExprVisitorTest, TestRelExpr) { // (5 == 10) => (5 == 10) - // no change should be made to the orginal expression + // no change should be made to the original expression { auto original = eqExpr(constantExpr(5), constantExpr(10)); auto res = ExpressionUtils::reduceUnaryNotExpr(original); diff --git a/src/graph/visitor/test/VisitorTestBase.h b/src/graph/visitor/test/VisitorTestBase.h index 76991d8a11d..003bf7ccd61 100644 --- a/src/graph/visitor/test/VisitorTestBase.h +++ b/src/graph/visitor/test/VisitorTestBase.h @@ -153,14 +153,14 @@ class VisitorTestBase : public ::testing::Test { } CaseExpression *caseExpr(Expression *cond, - Expression *defaltResult, + Expression *defaultResult, Expression *when, Expression *then) { auto caseList = CaseList::make(pool); caseList->add(when, then); auto expr = CaseExpression::make(pool, caseList); expr->setCondition(cond); - expr->setDefault(defaltResult); + expr->setDefault(defaultResult); return expr; } diff --git a/src/interface/common.thrift b/src/interface/common.thrift index 3636deec436..abcc25faeca 100644 --- a/src/interface/common.thrift +++ b/src/interface/common.thrift @@ -269,7 +269,7 @@ enum PropertyType { FLOAT = 4, DOUBLE = 5, STRING = 6, - // String with fixed length. If the string content is shorteri + // String with fixed length. If the string content is shorter // than the given length, '\0' will be padded to the end FIXED_STRING = 7, // New in v2 INT8 = 8, // New in v2 @@ -361,7 +361,7 @@ enum ErrorCode { E_BALANCED = -2024, E_NO_RUNNING_BALANCE_PLAN = -2025, E_NO_VALID_HOST = -2026, - E_CORRUPTTED_BALANCE_PLAN = -2027, + E_CORRUPTED_BALANCE_PLAN = -2027, E_NO_INVALID_BALANCE_PLAN = -2028, diff --git a/src/interface/storage.thrift b/src/interface/storage.thrift index e4ff187305c..3451fd8b8e2 100644 --- a/src/interface/storage.thrift +++ b/src/interface/storage.thrift @@ -30,7 +30,7 @@ struct RequestCommon { struct PartitionResult { 1: required common.ErrorCode code, 2: required common.PartitionID part_id, - // Only valid when code is E_LEADER_CHANAGED. + // Only valid when code is E_LEADER_CHANGED. 3: optional common.HostAddr leader, } @@ -63,7 +63,7 @@ enum StatType { struct StatProp { // Alias of the stats property 1: binary alias, - // An eperssion. In most of cases, it is a reference to a specific property + // An expression. In most of cases, it is a reference to a specific property 2: binary prop, // Stats method 3: StatType stat, @@ -74,7 +74,7 @@ struct StatProp { struct Expr { // Alias of the expression 1: binary alias, - // An eperssion. It could be any valid expression, + // An expression. It could be any valid expression, 2: binary expr, } @@ -127,7 +127,7 @@ enum EdgeDirection { struct TraverseSpec { // When edge_type > 0, going along the out-edge, otherwise, along the in-edge - // If the edge type list is empty, all edges will be scaned + // If the edge type list is empty, all edges will be scanned 1: list edge_types, // When above edge_types is not empty, edge_direction should be ignored // When edge_types is empty, edge_direction decided which edge types will be @@ -156,7 +156,7 @@ struct TraverseSpec { 9: optional bool random, // Return the top/bottom N rows for each given vertex 10: optional i64 limit, - // If provided, only the rows satified the given expression will be returned + // If provided, only the rows satisfied the given expression will be returned 11: optional binary filter, } @@ -286,7 +286,7 @@ struct GetPropResponse { // | ..... | // ==================================== // - // Each column represents one peoperty. the column name is in the form of "tag_name.prop_alias" + // Each column represents one property. the column name is in the form of "tag_name.prop_alias" // or "edge_type_name.prop_alias" in the same order which specified in VertexProp or EdgeProp // // If the request is to get tag prop, the first column will **always** be the vid, @@ -341,7 +341,7 @@ struct AddVerticesRequest { // in the NewVertex.NewTag.props 3: map> (cpp.template = "std::unordered_map") prop_names, - // if ture, when (vertexID,tagID) already exists, do nothing + // if true, when (vertexID,tagID) already exists, do nothing 4: bool if_not_exists, 5: optional RequestCommon common, } @@ -354,7 +354,7 @@ struct AddEdgesRequest { // A list of property names. The order of the property names should match // the data order specified in the NewEdge.props 3: list prop_names, - // if ture, when edge already exists, do nothing + // if true, when edge already exists, do nothing 4: bool if_not_exists, 5: optional RequestCommon common, } @@ -407,7 +407,7 @@ struct UpdateResponse { // The name of the first column is "_inserted". It has a boolean value. It's // TRUE if insertion happens // Starting from the second column, it's the all returned properties, one column - // per peoperty. If there is no given property, the value will be a NULL + // per property. If there is no given property, the value will be a NULL 2: optional common.DataSet props, } @@ -489,7 +489,7 @@ struct LookupIndexResp { // properties; when looking up the edge index, each row represents one edge // and its properties. // - // Each column represents one peoperty. the column name is in the form of "tag_name.prop_alias" + // Each column represents one property. the column name is in the form of "tag_name.prop_alias" // or "edge_type_name.prop_alias" in the same order which specified in return_columns of request 2: optional common.DataSet data, } @@ -618,7 +618,7 @@ struct ScanEdgeResponse { 1: required ResponseCommon result, // The data will return as a dataset. The format is as follows: // Each column represents one property. the column name is in the form of "edge_name.prop_alias" - // in the same order which specified in EdgeProp in requesss. + // in the same order which specified in EdgeProp in requests. 2: common.DataSet edge_data, 3: map (cpp.template = "std::unordered_map") cursors; @@ -627,11 +627,11 @@ struct ScanEdgeResponse { struct TaskPara { 1: common.GraphSpaceID space_id, 2: optional list parts, - 3: optional list task_specfic_paras + 3: optional list task_specific_paras } struct AddAdminTaskRequest { - // rebuild index / flush / compact / statis + // rebuild index / flush / compact / stats 1: meta.AdminCmd cmd 2: i32 job_id 3: i32 task_id @@ -889,7 +889,7 @@ struct ChainAddEdgesRequest { // A list of property names. The order of the property names should match // the data order specified in the NewEdge.props 3: list prop_names, - // if ture, when edge already exists, do nothing + // if true, when edge already exists, do nothing 4: bool if_not_exists, // 5: map term_of_parts, 5: i64 term diff --git a/src/kvstore/DiskManager.h b/src/kvstore/DiskManager.h index 8e00689dc4e..4c0afe3c9a1 100644 --- a/src/kvstore/DiskManager.h +++ b/src/kvstore/DiskManager.h @@ -35,8 +35,8 @@ class DiskManager { // Canonical path which contains the specified space and part, e.g. // "/DataPath/nebula/spaceId". As for one storage instance, at most one path - // should contain a parition. Note that there isn't a separate dir for a - // parititon (except wal), so we return space dir + // should contain a partition. Note that there isn't a separate dir for a + // partition (except wal), so we return space dir StatusOr path(GraphSpaceID spaceId, PartitionID partId); // pre-condition: path is the space path, so it must end with /nebula/spaceId diff --git a/src/kvstore/Listener.cpp b/src/kvstore/Listener.cpp index 7e0dd60bac3..f99ad2476e7 100644 --- a/src/kvstore/Listener.cpp +++ b/src/kvstore/Listener.cpp @@ -191,7 +191,7 @@ void Listener::doApply() { case OP_BATCH_WRITE: { auto batch = decodeBatchValue(log); for (auto& op : batch) { - // OP_BATCH_PUT and OP_BATCH_REMOVE_RANGE is igored + // OP_BATCH_PUT and OP_BATCH_REMOVE_RANGE is ignored if (op.first == BatchLogType::OP_BATCH_PUT) { data.emplace_back(op.second.first, op.second.second); } diff --git a/src/kvstore/NebulaStore.cpp b/src/kvstore/NebulaStore.cpp index adcf2d762e5..e9675500ac9 100644 --- a/src/kvstore/NebulaStore.cpp +++ b/src/kvstore/NebulaStore.cpp @@ -22,7 +22,7 @@ DEFINE_int32(custom_filter_interval_secs, "interval to trigger custom compaction, < 0 means always do " "default minor compaction"); DEFINE_int32(num_workers, 4, "Number of worker threads"); -DEFINE_int32(clean_wal_interval_secs, 600, "inerval to trigger clean expired wal"); +DEFINE_int32(clean_wal_interval_secs, 600, "interval to trigger clean expired wal"); DEFINE_bool(auto_remove_invalid_space, false, "whether remove data of invalid space when restart"); DECLARE_bool(rocksdb_disable_wal); @@ -558,7 +558,7 @@ void NebulaStore::removeSpaceDir(const std::string& dir) { LOG(INFO) << "Try to remove space directory: " << dir; boost::filesystem::remove_all(dir); } catch (const boost::filesystem::filesystem_error& e) { - LOG(ERROR) << "Exception caught while remove directory, please delelte it by manual: " + LOG(ERROR) << "Exception caught while remove directory, please delete it by manual: " << e.what(); } } diff --git a/src/kvstore/Part.cpp b/src/kvstore/Part.cpp index 8d8526904c1..f3f2830fcd4 100644 --- a/src/kvstore/Part.cpp +++ b/src/kvstore/Part.cpp @@ -201,9 +201,9 @@ void Part::onLeaderReady(TermID term) { } } -void Part::registerOnLeaderReady(LeaderChagneCB cb) { leaderReadyCB_.emplace_back(std::move(cb)); } +void Part::registerOnLeaderReady(LeaderChangeCB cb) { leaderReadyCB_.emplace_back(std::move(cb)); } -void Part::registerOnLeaderLost(LeaderChagneCB cb) { leaderLostCB_.emplace_back(std::move(cb)); } +void Part::registerOnLeaderLost(LeaderChangeCB cb) { leaderLostCB_.emplace_back(std::move(cb)); } void Part::onDiscoverNewLeader(HostAddr nLeader) { LOG(INFO) << idStr_ << "Find the new leader " << nLeader; diff --git a/src/kvstore/Part.h b/src/kvstore/Part.h index 0af34aba900..c11a43ba4e7 100644 --- a/src/kvstore/Part.h +++ b/src/kvstore/Part.h @@ -115,18 +115,18 @@ class Part : public raftex::RaftPart { TermID term; }; - using LeaderChagneCB = std::function; - void registerOnLeaderReady(LeaderChagneCB cb); + using LeaderChangeCB = std::function; + void registerOnLeaderReady(LeaderChangeCB cb); - void registerOnLeaderLost(LeaderChagneCB cb); + void registerOnLeaderLost(LeaderChangeCB cb); protected: GraphSpaceID spaceId_; PartitionID partId_; std::string walPath_; NewLeaderCallback newLeaderCb_ = nullptr; - std::vector leaderReadyCB_; - std::vector leaderLostCB_; + std::vector leaderReadyCB_; + std::vector leaderLostCB_; private: KVEngine* engine_ = nullptr; diff --git a/src/kvstore/RateLimiter.h b/src/kvstore/RateLimiter.h index 5f0e341a6ad..727049b4bf9 100644 --- a/src/kvstore/RateLimiter.h +++ b/src/kvstore/RateLimiter.h @@ -16,7 +16,7 @@ DECLARE_bool(skip_wait_in_rate_limiter); namespace nebula { namespace kvstore { -// A simple wrapper for foly::TokenBucket, it would limit the speed to rate_ * buckets_.size(). +// A simple wrapper for folly::TokenBucket, it would limit the speed to rate_ * buckets_.size(). // For now, there are two major cases: snapshot (both for balance or catch up) and rebuild index. class RateLimiter { public: @@ -27,15 +27,15 @@ class RateLimiter { bucket_.reset(new folly::DynamicTokenBucket(static_cast(now + waitInSec))); } - // Caller must make sure the **the parition has been add, and won't be removed during consume.** - // Snaphot and rebuild index follow this principle by design. + // Caller must make sure the **the partition has been add, and won't be removed during consume.** + // Snapshot and rebuild index follow this principle by design. void consume(double toConsume, double rate, double burstSize) { if (toConsume > burstSize) { // consumeWithBorrowAndWait do nothing when toConsume > burstSize_, we sleep 1s instead std::this_thread::sleep_for(std::chrono::seconds(1)); } else { - // If there are enouth tokens, consume and return immediately. - // If not, cosume anyway, but sleep enough time before return. + // If there are enough tokens, consume and return immediately. + // If not, consume anyway, but sleep enough time before return. auto now = time::WallClock::fastNowInSec(); bucket_->consumeWithBorrowAndWait(toConsume, rate, burstSize, static_cast(now)); } diff --git a/src/kvstore/RocksEngine.cpp b/src/kvstore/RocksEngine.cpp index c3af0a8a3fe..e386743c415 100644 --- a/src/kvstore/RocksEngine.cpp +++ b/src/kvstore/RocksEngine.cpp @@ -213,7 +213,7 @@ nebula::cpp2::ErrorCode RocksEngine::range(const std::string& start, nebula::cpp2::ErrorCode RocksEngine::prefix(const std::string& prefix, std::unique_ptr* storageIter) { // In fact, we don't need to check prefix.size() >= extractorLen_, which is caller's duty to make - // sure the prefix bloom filter exists. But this is quite error-proning, so we do a check here. + // sure the prefix bloom filter exists. But this is quite error-prone, so we do a check here. if (FLAGS_enable_rocksdb_prefix_filtering && prefix.size() >= extractorLen_) { return prefixWithExtractor(prefix, storageIter); } else { @@ -517,7 +517,7 @@ void RocksEngine::openBackupEngine(GraphSpaceID spaceId) { } else if (!status.ok()) { LOG(FATAL) << status.ToString(); } - LOG(INFO) << "restore from latest backup succesfully" + LOG(INFO) << "restore from latest backup successfully" << ", backup path " << backupPath_ << ", wal path " << walDir << ", data path " << dataPath; } diff --git a/src/kvstore/RocksEngine.h b/src/kvstore/RocksEngine.h index 5542ab89784..06226b5789c 100644 --- a/src/kvstore/RocksEngine.h +++ b/src/kvstore/RocksEngine.h @@ -120,7 +120,7 @@ class RocksEngine : public KVEngine { void stop() override; - // return path to a spaceId, e.g. "/DataPath/nebula/spaceId", usally it should + // return path to a spaceId, e.g. "/DataPath/nebula/spaceId", usually it should // contain two subdir: data and wal. const char* getDataRoot() const override { return dataPath_.c_str(); } diff --git a/src/kvstore/plugins/elasticsearch/ESListener.cpp b/src/kvstore/plugins/elasticsearch/ESListener.cpp index b8f8702018f..767298d7dbf 100644 --- a/src/kvstore/plugins/elasticsearch/ESListener.cpp +++ b/src/kvstore/plugins/elasticsearch/ESListener.cpp @@ -74,7 +74,7 @@ bool ESListener::persist(LogID lastId, TermID lastTerm, LogID lastApplyLogId) { std::pair ESListener::lastCommittedLogId() { if (access(lastApplyLogFile_->c_str(), 0) != 0) { - VLOG(3) << "Invalid or non-existent file : " << *lastApplyLogFile_; + VLOG(3) << "Invalid or nonexistent file : " << *lastApplyLogFile_; return {0, 0}; } int32_t fd = open(lastApplyLogFile_->c_str(), O_RDONLY); @@ -97,7 +97,7 @@ std::pair ESListener::lastCommittedLogId() { LogID ESListener::lastApplyLogId() { if (access(lastApplyLogFile_->c_str(), 0) != 0) { - VLOG(3) << "Invalid or non-existent file : " << *lastApplyLogFile_; + VLOG(3) << "Invalid or nonexistent file : " << *lastApplyLogFile_; return 0; } int32_t fd = open(lastApplyLogFile_->c_str(), O_RDONLY); diff --git a/src/kvstore/plugins/hbase/HBaseStore.h b/src/kvstore/plugins/hbase/HBaseStore.h index f137f9f8352..3ceff2be77c 100644 --- a/src/kvstore/plugins/hbase/HBaseStore.h +++ b/src/kvstore/plugins/hbase/HBaseStore.h @@ -157,11 +157,11 @@ class HBaseStore : public KVStore { KVCallback cb); void asyncAtomicOp(GraphSpaceID, PartitionID, raftex::AtomicOp, KVCallback) override { - LOG(FATAL) << "Not supportted yet!"; + LOG(FATAL) << "Not supported yet!"; } void asyncAtomicOp(GraphSpaceID, PartitionID, std::string&& multiValues, KVCallback) override { - LOG(FATAL) << "Not supportted yet!"; + LOG(FATAL) << "Not supported yet!"; } ResultCode ingest(GraphSpaceID spaceId) override; diff --git a/src/kvstore/plugins/hbase/hbase.thrift b/src/kvstore/plugins/hbase/hbase.thrift index cfd49df60b9..710a325127d 100644 --- a/src/kvstore/plugins/hbase/hbase.thrift +++ b/src/kvstore/plugins/hbase/hbase.thrift @@ -398,7 +398,7 @@ service THBaseService { 4: binary qualifier, /** the expected value, if not provided the - check is for the non-existence of the + check is for the nonexistence of the column in question */ 5: binary value, @@ -466,7 +466,7 @@ service THBaseService { 4: binary qualifier, /** the expected value, if not provided the - check is for the non-existence of the + check is for the nonexistence of the column in question */ 5: binary value, @@ -613,7 +613,7 @@ service THBaseService { 5: TCompareOp compareOp, /** the expected value to be compared against, if not provided the - check is for the non-existence of the column in question */ + check is for the nonexistence of the column in question */ 6: binary value, /** row mutations to execute if the value matches */ diff --git a/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp b/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp index d4903503875..cf62f839f22 100644 --- a/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp +++ b/src/kvstore/plugins/hbase/test/HBaseStoreTest.cpp @@ -38,11 +38,11 @@ TEST(HBaseStoreTest, SimpleTest) { EdgeVerPlaceHolder edgeVersion = 1; std::vector edgeKeys; std::vector edgeData; - auto edgeScheam = sm->getEdgeSchema(spaceId, edgeType, edgeVersion); + auto edgeSchema = sm->getEdgeSchema(spaceId, edgeType, edgeVersion); for (auto vertexId = srcId; vertexId < dstId; vertexId++) { auto edgeKey = NebulaKeyUtils::edgeKey(partId, srcId, edgeType, rank, vertexId); edgeKeys.emplace_back(edgeKey); - RowWriter edgeWriter(edgeScheam); + RowWriter edgeWriter(edgeSchema); for (int32_t iInt = 0; iInt < 10; iInt++) { edgeWriter << iInt; } @@ -53,11 +53,11 @@ TEST(HBaseStoreTest, SimpleTest) { edgeData.emplace_back(edgeKey, edgeValue); } - edgeScheam = sm->getEdgeSchema(spaceId, edgeType + 1, edgeVersion); + edgeSchema = sm->getEdgeSchema(spaceId, edgeType + 1, edgeVersion); for (; edgeVersion < 10L; edgeVersion++) { auto edgeKey = NebulaKeyUtils::edgeKey(partId, srcId, edgeType + 1, rank, dstId); edgeKeys.emplace_back(edgeKey); - RowWriter edgeWriter(edgeScheam); + RowWriter edgeWriter(edgeSchema); for (int32_t iInt = 0; iInt < 5; iInt++) { edgeWriter << iInt; } diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index dbceffd4bb9..17b666488cb 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -663,7 +663,7 @@ folly::Future RaftPart::appendLogAsync(ClusterID source, } if (!checkAppendLogResult(res)) { - // Mosy likely failed because the parttion is not leader + // Mosy likely failed because the partition is not leader LOG_EVERY_N(WARNING, 1000) << idStr_ << "Cannot append logs, clean the buffer"; return res; } @@ -699,7 +699,7 @@ void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) { VLOG(2) << idStr_ << "Ready to append logs from id " << iter.logId() << " (Current term is " << currTerm << ")"; } else { - LOG(ERROR) << idStr_ << "Only happend when Atomic op failed"; + LOG(ERROR) << idStr_ << "Only happened when Atomic op failed"; replicatingLogs_ = false; return; } @@ -1071,9 +1071,9 @@ folly::Future RaftPart::leaderElection() { // and need the snapshot from B. Meanwhile C begin the election, // C will be Candidate, but because C is in WAITING_SNAPSHOT, // so prepareElectionRequest will return false and go on the election. - // Becasue C is in Candidate, so it will reject the snapshot request from B. + // Because C is in Candidate, so it will reject the snapshot request from B. // Infinite loop begins. - // So we neeed to go back to the follower state to avoid the case. + // So we need to go back to the follower state to avoid the case. std::lock_guard g(raftLock_); role_ = Role::FOLLOWER; inElection_ = false; @@ -1190,7 +1190,7 @@ void RaftPart::statusPolling(int64_t startTime) { VLOG(2) << idStr_ << "Stop the election"; } else { // No leader has been elected, need to continue - // (After sleeping a random period betwen [500ms, 2s]) + // (After sleeping a random period between [500ms, 2s]) VLOG(2) << idStr_ << "Wait for a while and continue the leader election"; delay = (folly::Random::rand32(1500) + 500) * weight_; } @@ -1239,7 +1239,7 @@ bool RaftPart::needToCleanWal() { void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, cpp2::AskForVoteResponse& resp) { - LOG(INFO) << idStr_ << "Recieved a VOTING request" + LOG(INFO) << idStr_ << "Received a VOTING request" << ": space = " << req.get_space() << ", partition = " << req.get_part() << ", candidateAddr = " << req.get_candidate_addr() << ":" << req.get_candidate_port() << ", term = " << req.get_term() << ", lastLogId = " << req.get_last_log_id() @@ -1539,7 +1539,7 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, } else if (code == nebula::cpp2::ErrorCode::E_WRITE_STALLED) { VLOG(1) << idStr_ << "Follower delay committing log " << committedLogId_ + 1 << " to " << lastLogIdCanCommit; - // Even if log is not applied to state machine, still regard as succeded: + // Even if log is not applied to state machine, still regard as succeeded: // 1. As a follower, upcoming request will try to commit them // 2. If it is elected as leader later, it will try to commit them as well resp.set_committed_log_id(committedLogId_); @@ -1574,7 +1574,7 @@ cpp2::ErrorCode RaftPart::verifyLeader(const REQ& req) { << ". The local term is " << term_ << ". The remote term is not newer"; return cpp2::ErrorCode::E_TERM_OUT_OF_DATE; } else if (req.get_current_term() > term_) { - // Leader stickness, no matter the term in Request is larger or not. + // Leader stickiness, no matter the term in Request is larger or not. // TODO(heng) Maybe we should reconsider the logic if (leader_ != HostAddr("", 0) && leader_ != candidate && lastMsgRecvDur_.elapsedInMSec() < FLAGS_raft_heartbeat_interval_secs * 1000) { diff --git a/src/kvstore/raftex/RaftPart.h b/src/kvstore/raftex/RaftPart.h index 6454ee96d9b..a67fc0123c6 100644 --- a/src/kvstore/raftex/RaftPart.h +++ b/src/kvstore/raftex/RaftPart.h @@ -212,7 +212,7 @@ class RaftPart : public std::enable_shared_from_this { bool needToCleanWal(); - // leader + follwers + // leader + followers std::vector peers() const; std::set listeners() const; @@ -303,7 +303,7 @@ class RaftPart : public std::enable_shared_from_this { private: // A list of // idx -- the index of the peer - // resp -- coresponding response of peer[index] + // resp -- corresponding response of peer[index] using ElectionResponses = std::vector>; using AppendLogResponses = std::vector>; using HeartbeatResponses = std::vector>; diff --git a/src/kvstore/raftex/test/LeaderElectionTest.cpp b/src/kvstore/raftex/test/LeaderElectionTest.cpp index 0b024924092..d5fda35ba98 100644 --- a/src/kvstore/raftex/test/LeaderElectionTest.cpp +++ b/src/kvstore/raftex/test/LeaderElectionTest.cpp @@ -110,7 +110,7 @@ TEST(LeaderElection, LeaderCrash) { services[idx]->addPartition(copies.back()); copies.back()->start(getPeers(allHosts, allHosts[idx])); - // Wait untill all copies agree on the same leader + // Wait until all copies agree on the same leader waitUntilLeaderElected(copies, leader); // Check all hosts agree on the same leader diff --git a/src/kvstore/raftex/test/LogAppendTest.cpp b/src/kvstore/raftex/test/LogAppendTest.cpp index 5b2396a29bd..4e3fc92d791 100644 --- a/src/kvstore/raftex/test/LogAppendTest.cpp +++ b/src/kvstore/raftex/test/LogAppendTest.cpp @@ -91,7 +91,7 @@ TEST(LogAppend, MultiThreadAppend) { if (fut.isReady() && fut.value() == AppendLogResult::E_BUFFER_OVERFLOW) { LOG(FATAL) << "Should not reach here"; } else if (j == numLogs) { - // Only wait on the last log messaage + // Only wait on the last log message ASSERT_EQ(AppendLogResult::SUCCEEDED, std::move(fut).get()); } break; diff --git a/src/kvstore/raftex/test/RaftCase.cpp b/src/kvstore/raftex/test/RaftCase.cpp index 89f5f00414f..42855cf0d81 100644 --- a/src/kvstore/raftex/test/RaftCase.cpp +++ b/src/kvstore/raftex/test/RaftCase.cpp @@ -38,7 +38,7 @@ TEST_F(ThreeRaftTest, LeaderCrashReboot) { size_t idx = leader_->index(); killOneCopy(services_, copies_, leader_, idx); - // Wait untill all copies agree on the same leader_ + // Wait until all copies agree on the same leader_ waitUntilLeaderElected(copies_, leader_); // Check all hosts agree on the same leader_ checkLeadership(copies_, leader_); @@ -112,7 +112,7 @@ TEST_F(ThreeRaftTest, LeaderCrashRebootWithLogs) { killOneCopy(services_, copies_, leader_, leader_->index()); LOG(INFO) << "=====> Wait until leader of term 2 elected"; - // Wait untill all copies agree on the same leader_ + // Wait until all copies agree on the same leader_ waitUntilLeaderElected(copies_, leader_); auto leader2 = leader_; ASSERT_NE(leader1, leader2); diff --git a/src/kvstore/raftex/test/RaftexTestBase.cpp b/src/kvstore/raftex/test/RaftexTestBase.cpp index 3234644ec1c..5b9589d39c8 100644 --- a/src/kvstore/raftex/test/RaftexTestBase.cpp +++ b/src/kvstore/raftex/test/RaftexTestBase.cpp @@ -198,7 +198,7 @@ void setupRaft(int32_t numCopies, copies.back()->start(getPeers(allHosts, allHosts[i], isLearner), isLearner[i]); } - // Wait untill all copies agree on the same leader + // Wait until all copies agree on the same leader waitUntilLeaderElected(copies, leader, isLearner); } @@ -282,7 +282,7 @@ bool checkConsensus(std::vector>& copies, std::vector& msgs) { int32_t count = 0; for (; count < 3; count++) { - bool concensus = true; + bool consensus = true; // Sleep a while to make sure the last log has been committed on followers sleep(FLAGS_raft_heartbeat_interval_secs); @@ -290,12 +290,12 @@ bool checkConsensus(std::vector>& copies, for (auto& c : copies) { if (c != nullptr && c->isRunning()) { if (msgs.size() != c->getNumLogs() || !checkLog(c, start, end, msgs)) { - concensus = false; + consensus = false; break; } } } - if (concensus == true) { + if (consensus == true) { return true; } } diff --git a/src/kvstore/test/LogEncoderTest.cpp b/src/kvstore/test/LogEncoderTest.cpp index 02ee5753c86..f57e1a9d61e 100644 --- a/src/kvstore/test/LogEncoderTest.cpp +++ b/src/kvstore/test/LogEncoderTest.cpp @@ -134,17 +134,17 @@ TEST(LogEncoderTest, BatchTest) { auto encoded = encodeBatchValue(helper->getBatch()); auto decoded = decodeBatchValue(encoded.c_str()); - std::vector>> expectd; - expectd.emplace_back(OP_BATCH_REMOVE, - std::pair("remove", "")); - expectd.emplace_back(OP_BATCH_PUT, - std::pair("put_key", "put_value")); - expectd.emplace_back(OP_BATCH_REMOVE_RANGE, - std::pair("begin", "end")); - expectd.emplace_back( + std::vector>> expected; + expected.emplace_back(OP_BATCH_REMOVE, + std::pair("remove", "")); + expected.emplace_back(OP_BATCH_PUT, + std::pair("put_key", "put_value")); + expected.emplace_back(OP_BATCH_REMOVE_RANGE, + std::pair("begin", "end")); + expected.emplace_back( OP_BATCH_PUT, std::pair("put_key_again", "put_value_again")); - ASSERT_EQ(expectd, decoded); + ASSERT_EQ(expected, decoded); } } // namespace kvstore diff --git a/src/kvstore/test/NebulaListenerTest.cpp b/src/kvstore/test/NebulaListenerTest.cpp index c02e925beb6..7cfe56cb4bd 100644 --- a/src/kvstore/test/NebulaListenerTest.cpp +++ b/src/kvstore/test/NebulaListenerTest.cpp @@ -227,7 +227,7 @@ class ListenerBasicTest : public ::testing::TestWithParamstart(std::move(raftPeers)); listeners_[index]->spaceListeners_[spaceId_]->listeners_[partId].emplace( meta::cpp2::ListenerType::UNKNOWN, dummy); - dummys_.emplace(partId, dummy); + dummies_.emplace(partId, dummy); } } @@ -287,9 +287,9 @@ class ListenerBasicTest : public ::testing::TestWithParam listenerHosts_; std::vector> stores_; std::vector> listeners_; - // dummys_ is a copy of Listener in listeners_, for convience to check + // dummies_ is a copy of Listener in listeners_, for convenience to check // consensus - std::unordered_map> dummys_; + std::unordered_map> dummies_; }; class ListenerAdvanceTest : public ListenerBasicTest { @@ -338,7 +338,7 @@ TEST_P(ListenerBasicTest, SimpleTest) { LOG(INFO) << "Check listener's data"; for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(100, data.size()); for (int32_t i = 0; i < static_cast(data.size()); i++) { @@ -367,7 +367,7 @@ TEST_P(ListenerBasicTest, TransLeaderTest) { baton.wait(); } - LOG(INFO) << "Trasfer all part leader to first replica"; + LOG(INFO) << "Transfer all part leader to first replica"; auto targetAddr = NebulaStore::getRaftAddr(peers_[0]); for (int32_t partId = 1; partId <= partCount_; partId++) { folly::Baton baton; @@ -408,7 +408,7 @@ TEST_P(ListenerBasicTest, TransLeaderTest) { LOG(INFO) << "Check listener's data"; for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(200, data.size()); for (int32_t i = 0; i < static_cast(data.size()); i++) { @@ -433,7 +433,7 @@ TEST_P(ListenerBasicTest, CommitSnapshotTest) { size += kvStr.size(); rows.emplace_back(kvStr); } - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; auto ret = dummy->commitSnapshot(rows, 100, 1, true); CHECK_EQ(ret.first, 100); CHECK_EQ(ret.second, size); @@ -441,7 +441,7 @@ TEST_P(ListenerBasicTest, CommitSnapshotTest) { LOG(INFO) << "Check listener's data"; for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(100, data.size()); for (int32_t i = 0; i < static_cast(data.size()); i++) { @@ -477,29 +477,29 @@ TEST_P(ListenerBasicTest, ListenerResetByWalTest) { sleep(FLAGS_raft_heartbeat_interval_secs + 3); for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(100000, data.size()); } for (int32_t partId = 1; partId <= partCount_; partId++) { - dummys_[partId]->resetListener(); - CHECK_EQ(0, dummys_[partId]->data().size()); - CHECK_EQ(0, dummys_[partId]->getApplyId()); + dummies_[partId]->resetListener(); + CHECK_EQ(0, dummies_[partId]->data().size()); + CHECK_EQ(0, dummies_[partId]->getApplyId()); } sleep(FLAGS_raft_heartbeat_interval_secs + 3); for (int32_t partId = 1; partId <= partCount_; partId++) { while (true) { - if (dummys_[partId]->pursueLeaderDone()) { + if (dummies_[partId]->pursueLeaderDone()) { break; } } } for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(100000, data.size()); } @@ -529,7 +529,7 @@ TEST_P(ListenerAdvanceTest, ListenerResetBySnapshotTest) { sleep(2 * FLAGS_raft_heartbeat_interval_secs); for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(10000, data.size()); } @@ -550,9 +550,9 @@ TEST_P(ListenerAdvanceTest, ListenerResetBySnapshotTest) { } for (int32_t partId = 1; partId <= partCount_; partId++) { - dummys_[partId]->resetListener(); - CHECK_EQ(0, dummys_[partId]->getApplyId()); - auto termAndId = dummys_[partId]->committedId(); + dummies_[partId]->resetListener(); + CHECK_EQ(0, dummies_[partId]->getApplyId()); + auto termAndId = dummies_[partId]->committedId(); CHECK_EQ(0, termAndId.first); CHECK_EQ(0, termAndId.second); } @@ -563,10 +563,10 @@ TEST_P(ListenerAdvanceTest, ListenerResetBySnapshotTest) { for (int32_t partId = 1; partId <= partCount_; partId++) { auto retry = 0; while (retry++ < 6) { - auto result = dummys_[partId]->committedSnapshot(); + auto result = dummies_[partId]->committedSnapshot(); if (result.first >= 10000) { partResult.emplace_back(true); - ASSERT_EQ(10000, dummys_[partId]->data().size()); + ASSERT_EQ(10000, dummies_[partId]->data().size()); break; } usleep(1000); @@ -579,8 +579,9 @@ TEST_P(ListenerAdvanceTest, ListenerResetBySnapshotTest) { TEST_P(ListenerSnapshotTest, SnapshotRateLimitTest) { for (int32_t partId = 1; partId <= partCount_; partId++) { // Write 10000 kvs in a part, key size is sizeof(partId) + vId + tagId = 4 + 8 + 4 = 16, - // value size is 24, so total size of a kv is 40. The snapshot size of a parition will be around - // 400Kb, and the rate limit is set to 40Kb, so snapshot will be sent at least 10 seconds. + // value size is 24, so total size of a kv is 40. The snapshot size of a partition will be + // around 400Kb, and the rate limit is set to 40Kb, so snapshot will be sent at least 10 + // seconds. for (int32_t i = 0; i < 10; i++) { std::vector data; for (int32_t j = 0; j < 1000; j++) { @@ -603,7 +604,7 @@ TEST_P(ListenerSnapshotTest, SnapshotRateLimitTest) { sleep(2 * FLAGS_raft_heartbeat_interval_secs); for (int32_t partId = 1; partId <= partCount_; partId++) { - auto dummy = dummys_[partId]; + auto dummy = dummies_[partId]; const auto& data = dummy->data(); CHECK_EQ(10000, data.size()); } @@ -624,27 +625,27 @@ TEST_P(ListenerSnapshotTest, SnapshotRateLimitTest) { } for (int32_t partId = 1; partId <= partCount_; partId++) { - dummys_[partId]->resetListener(); - CHECK_EQ(0, dummys_[partId]->getApplyId()); - auto termAndId = dummys_[partId]->committedId(); + dummies_[partId]->resetListener(); + CHECK_EQ(0, dummies_[partId]->getApplyId()); + auto termAndId = dummies_[partId]->committedId(); CHECK_EQ(0, termAndId.first); CHECK_EQ(0, termAndId.second); } // listener will try to pull snapshot for now. Since we have limit the snapshot send rate to 40Kb // in 1 second and batch size will be 10Kb, it would take at least 10 second to finish. Besides, - // there be at least 40 batchs + // there be at least 40 batches auto startTime = time::WallClock::fastNowInSec(); while (true) { std::vector partResult; for (int32_t partId = 1; partId <= partCount_; partId++) { - auto result = dummys_[partId]->committedSnapshot(); + auto result = dummies_[partId]->committedSnapshot(); if (result.first >= 10000) { partResult.emplace_back(true); - ASSERT_EQ(10000, dummys_[partId]->data().size()); + ASSERT_EQ(10000, dummies_[partId]->data().size()); ASSERT_GE(time::WallClock::fastNowInSec() - startTime, 10); - ASSERT_GE(dummys_[partId]->snapshotBatchCount(), 40); + ASSERT_GE(dummies_[partId]->snapshotBatchCount(), 40); } } if (static_cast(partResult.size()) == partCount_) { diff --git a/src/kvstore/test/NebulaStoreTest.cpp b/src/kvstore/test/NebulaStoreTest.cpp index ec8a4e56a91..f189b66449e 100644 --- a/src/kvstore/test/NebulaStoreTest.cpp +++ b/src/kvstore/test/NebulaStoreTest.cpp @@ -436,7 +436,7 @@ TEST(NebulaStoreTest, TransLeaderTest) { }; LOG(INFO) << "Transfer leader to first copy"; - // all parttition tranfer leaders to first replica + // all partition transfer leaders to first replica GraphSpaceID spaceId = 0; for (int i = 0; i < 3; i++) { PartitionID partId = i; diff --git a/src/kvstore/test/RateLimiterTest.cpp b/src/kvstore/test/RateLimiterTest.cpp index 2c99d844462..6961a4cf1bb 100644 --- a/src/kvstore/test/RateLimiterTest.cpp +++ b/src/kvstore/test/RateLimiterTest.cpp @@ -15,7 +15,7 @@ DECLARE_uint32(snapshot_part_rate_limit); namespace nebula { namespace kvstore { -TEST(RateLimter, ConsumeLessEqualThanBurst) { +TEST(RateLimiter, ConsumeLessEqualThanBurst) { RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; @@ -27,7 +27,7 @@ TEST(RateLimter, ConsumeLessEqualThanBurst) { EXPECT_GE(time::WallClock::fastNowInSec() - now, 5); } -TEST(RateLimter, ConsumeGreaterThanBurst) { +TEST(RateLimiter, ConsumeGreaterThanBurst) { RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; @@ -40,7 +40,7 @@ TEST(RateLimter, ConsumeGreaterThanBurst) { EXPECT_GE(time::WallClock::fastNowInSec() - now, 5); } -TEST(RateLimter, RateLessThanBurst) { +TEST(RateLimiter, RateLessThanBurst) { RateLimiter limiter; auto now = time::WallClock::fastNowInSec(); int64_t count = 0; diff --git a/src/kvstore/test/RocksEngineTest.cpp b/src/kvstore/test/RocksEngineTest.cpp index a09264429d1..a12885bbdb2 100644 --- a/src/kvstore/test/RocksEngineTest.cpp +++ b/src/kvstore/test/RocksEngineTest.cpp @@ -206,13 +206,13 @@ TEST_P(RocksEngineTest, IngestTest) { rocksdb::SstFileWriter writer(rocksdb::EnvOptions(), options); fs::TempDir rootPath("/tmp/rocksdb_engine_IngestTest.XXXXXX"); auto file = folly::stringPrintf("%s/%s", rootPath.path(), "data.sst"); - auto stauts = writer.Open(file); - ASSERT_TRUE(stauts.ok()); + auto status = writer.Open(file); + ASSERT_TRUE(status.ok()); - stauts = writer.Put("key", "value"); - ASSERT_TRUE(stauts.ok()); - stauts = writer.Put("key_empty", ""); - ASSERT_TRUE(stauts.ok()); + status = writer.Put("key", "value"); + ASSERT_TRUE(status.ok()); + status = writer.Put("key_empty", ""); + ASSERT_TRUE(status.ok()); writer.Finish(); auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); @@ -300,7 +300,7 @@ TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); PartitionID partId = 1; VertexID vId = "vertex"; - VertexID notExisted = "notexist"; + VertexID nonexistent = "notExist"; auto writeVertex = [&](TagID tagId) { std::vector data; @@ -344,7 +344,7 @@ TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { if (FLAGS_enable_rocksdb_prefix_filtering) { scanVertex(vId); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); - scanVertex(notExisted); + scanVertex(nonexistent); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); } @@ -361,7 +361,7 @@ TEST_P(RocksEngineTest, VertexWholeKeyBloomFilterTest) { scanVertex(vId); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); // read not exists data, prefix key bloom filter will be useful - scanVertex(notExisted); + scanVertex(nonexistent); EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); } @@ -377,7 +377,7 @@ TEST_P(RocksEngineTest, EdgeWholeKeyBloomFilterTest) { auto engine = std::make_unique(0, kDefaultVIdLen, rootPath.path()); PartitionID partId = 1; VertexID vId = "vertex"; - VertexID notExisted = "notexist"; + VertexID nonexistent = "notExist"; auto writeEdge = [&](EdgeType edgeType) { std::vector data; @@ -421,7 +421,7 @@ TEST_P(RocksEngineTest, EdgeWholeKeyBloomFilterTest) { if (FLAGS_enable_rocksdb_prefix_filtering) { scanEdge(vId); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); - scanEdge(notExisted); + scanEdge(nonexistent); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); } @@ -438,7 +438,7 @@ TEST_P(RocksEngineTest, EdgeWholeKeyBloomFilterTest) { scanEdge(vId); EXPECT_EQ(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); // read not exists data, prefix key bloom filter will be useful - scanEdge(notExisted); + scanEdge(nonexistent); EXPECT_GT(statistics->getTickerCount(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL), 0); } diff --git a/src/kvstore/wal/AtomicLogBuffer.h b/src/kvstore/wal/AtomicLogBuffer.h index b6023266e21..46477cf2c49 100644 --- a/src/kvstore/wal/AtomicLogBuffer.h +++ b/src/kvstore/wal/AtomicLogBuffer.h @@ -230,7 +230,7 @@ class AtomicLogBuffer : public std::enable_shared_from_this { if (size_ + recSize > capacity_) { auto* tail = tail_.load(std::memory_order_relaxed); // todo(doodle): there is a potential problem is that: since Node::isFull - // is judeged by log count, we can only add new node when previous node + // is judged by log count, we can only add new node when previous node // has enough logs. So when tail is equal to head, we need to wait tail is // full, after head moves forward, at then tail can be marked as deleted. // So the log buffer would takes up more memory than its capacity. Since diff --git a/src/kvstore/wal/FileBasedWal.cpp b/src/kvstore/wal/FileBasedWal.cpp index caecf65de55..e55f3e72bbc 100644 --- a/src/kvstore/wal/FileBasedWal.cpp +++ b/src/kvstore/wal/FileBasedWal.cpp @@ -226,7 +226,7 @@ void FileBasedWal::scanAllWalFiles() { if (!walFiles_.empty()) { auto it = walFiles_.rbegin(); - // Try to scan last wal, if it is invalid or empty, scan the privous one + // Try to scan last wal, if it is invalid or empty, scan the previous one scanLastWal(it->second, it->second->firstId()); if (it->second->lastId() <= 0) { unlink(it->second->path()); @@ -413,7 +413,7 @@ void FileBasedWal::scanLastWal(WalFileInfoPtr info, LogID firstId) { } if (head != foot) { - LOG(ERROR) << "Message size doen't match: " << head << " != " << foot; + LOG(ERROR) << "Message size doesn't match: " << head << " != " << foot; break; } @@ -645,7 +645,7 @@ void FileBasedWal::cleanWAL() { return; } auto now = time::WallClock::fastNowInSec(); - // In theory we only need to keep the latest wal file because it is beging + // In theory we only need to keep the latest wal file because it is being // written now. However, sometimes will trigger raft snapshot even only a // small amount of logs is missing, especially when we reboot all storage, so // se keep one more wal. diff --git a/src/kvstore/wal/test/InMemoryLogBufferList.h b/src/kvstore/wal/test/InMemoryLogBufferList.h index 9fbd5aca0cd..12c2359e137 100644 --- a/src/kvstore/wal/test/InMemoryLogBufferList.h +++ b/src/kvstore/wal/test/InMemoryLogBufferList.h @@ -34,7 +34,7 @@ class InMemoryBufferList : public std::enable_shared_from_thisfirstLogId(); } if (firstIdInBuffer_ <= currId_) { - // Go no futher + // Go no further currIdx_ = currId_ - firstIdInBuffer_; nextFirstId_ = getFirstIdInNextBuffer(); return false; diff --git a/src/kvstore/wal/test/LogBufferBenchmark.cpp b/src/kvstore/wal/test/LogBufferBenchmark.cpp index 83cb7a5c27e..b1a6c9bbe7d 100644 --- a/src/kvstore/wal/test/LogBufferBenchmark.cpp +++ b/src/kvstore/wal/test/LogBufferBenchmark.cpp @@ -33,7 +33,7 @@ void prepareData(std::shared_ptr logBuffer, int32_t len, size_t } /************************* - * Begining of benchmarks + * Beginning of benchmarks ************************/ void runInMemoryLogBufferWriteTest(size_t iters, int32_t len) { @@ -379,7 +379,7 @@ int main(int argc, char** argv) { } /* Intel(R) Xeon(R) CPU E5-2690 v2 @ 3.00GHz --O2 kMaxLenght=64 write test +-O2 kMaxLength=64 write test ============================================================================ LogBufferBenchmark.cpprelative time/iter iters/s ============================================================================ diff --git a/src/meta/KVBasedClusterIdMan.h b/src/meta/KVBasedClusterIdMan.h index 60f5be8f9ec..b791d2ae971 100644 --- a/src/meta/KVBasedClusterIdMan.h +++ b/src/meta/KVBasedClusterIdMan.h @@ -51,7 +51,7 @@ class ClusterIdMan { ::close(fd); return false; } - LOG(INFO) << "Persiste clusterId " << clusterId << " succeeded!"; + LOG(INFO) << "Persist clusterId " << clusterId << " succeeded!"; ::close(fd); return true; } diff --git a/src/meta/MetaServiceUtils.cpp b/src/meta/MetaServiceUtils.cpp index 113790af208..96a5bb857a9 100644 --- a/src/meta/MetaServiceUtils.cpp +++ b/src/meta/MetaServiceUtils.cpp @@ -234,7 +234,7 @@ ErrorOr> MetaServiceUtils::bac if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { return result; } - LOG(INFO) << table.first << " table backup successed"; + LOG(INFO) << table.first << " table backup succeeded"; } if (spaceNames == nullptr) { @@ -248,7 +248,7 @@ ErrorOr> MetaServiceUtils::bac if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { return result; } - LOG(INFO) << table.first << " table backup successed"; + LOG(INFO) << table.first << " table backup succeeded"; } } @@ -262,7 +262,7 @@ ErrorOr> MetaServiceUtils::bac if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { return result; } - LOG(INFO) << table.first << " table backup successed"; + LOG(INFO) << table.first << " table backup succeeded"; } // The mapping of space name and space id needs to be handled separately. diff --git a/src/meta/processors/BaseProcessor-inl.h b/src/meta/processors/BaseProcessor-inl.h index b46862753c3..5b09579bd73 100644 --- a/src/meta/processors/BaseProcessor-inl.h +++ b/src/meta/processors/BaseProcessor-inl.h @@ -170,7 +170,7 @@ ErrorOr BaseProcessor::autoIncrementId() } template -ErrorOr BaseProcessor::getAvailableGolbalId() { +ErrorOr BaseProcessor::getAvailableGlobalId() { // A read lock has been added before call static const std::string kIdKey = "__id__"; int32_t id; @@ -206,7 +206,7 @@ ErrorOr BaseProcessor::autoIncrementIdIn // In order to be compatible with the existing old schema, and simple to implement, // when the local_id record does not exist in space, directly use the smallest // id available globally. - auto globalIdRet = getAvailableGolbalId(); + auto globalIdRet = getAvailableGlobalId(); if (!nebula::ok(globalIdRet)) { return nebula::error(globalIdRet); } diff --git a/src/meta/processors/BaseProcessor.h b/src/meta/processors/BaseProcessor.h index 37e8fbab200..537a583d448 100644 --- a/src/meta/processors/BaseProcessor.h +++ b/src/meta/processors/BaseProcessor.h @@ -162,7 +162,7 @@ class BaseProcessor { /** * Get the current available global id **/ - ErrorOr getAvailableGolbalId(); + ErrorOr getAvailableGlobalId(); /** * Get one auto-increment Id in spaceId. diff --git a/src/meta/processors/admin/AdminClient.cpp b/src/meta/processors/admin/AdminClient.cpp index c8d6ce43ace..61fe5cf001e 100644 --- a/src/meta/processors/admin/AdminClient.cpp +++ b/src/meta/processors/admin/AdminClient.cpp @@ -710,7 +710,7 @@ folly::Future AdminClient::addTask(cpp2::AdminCmd cmd, if (targetHost.empty()) { auto activeHostsRet = ActiveHostsMan::getActiveAdminHosts(kv_); if (!nebula::ok(activeHostsRet)) { - pro.setValue(Status::Error("Get actice hosts failed")); + pro.setValue(Status::Error("Get active hosts failed")); return f; } else { hosts = nebula::value(activeHostsRet); @@ -728,7 +728,7 @@ folly::Future AdminClient::addTask(cpp2::AdminCmd cmd, storage::cpp2::TaskPara para; para.set_space_id(spaceId); para.set_parts(std::move(parts)); - para.set_task_specfic_paras(taskSpecficParas); + para.set_task_specific_paras(taskSpecficParas); req.set_para(std::move(para)); std::function respGen = @@ -759,7 +759,7 @@ folly::Future AdminClient::stopTask(const std::vector& target, if (target.empty()) { auto activeHostsRet = ActiveHostsMan::getActiveAdminHosts(kv_); if (!nebula::ok(activeHostsRet)) { - pro.setValue(Status::Error("Get actice hosts failed")); + pro.setValue(Status::Error("Get active hosts failed")); return f; } else { hosts = nebula::value(activeHostsRet); diff --git a/src/meta/processors/admin/Balancer.cpp b/src/meta/processors/admin/Balancer.cpp new file mode 100644 index 00000000000..8cdad02b45a --- /dev/null +++ b/src/meta/processors/admin/Balancer.cpp @@ -0,0 +1,1232 @@ +/* Copyright (c) 2019 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "meta/processors/admin/Balancer.h" + +#include + +#include +#include + +#include "common/network/NetworkUtils.h" +#include "common/utils/MetaKeyUtils.h" +#include "kvstore/NebulaStore.h" +#include "meta/ActiveHostsMan.h" +#include "meta/common/MetaCommon.h" +#include "meta/processors/Common.h" + +DEFINE_double(leader_balance_deviation, + 0.05, + "after leader balance, leader count should in range " + "[avg * (1 - deviation), avg * (1 + deviation)]"); + +namespace nebula { +namespace meta { + +ErrorOr Balancer::balance(std::vector&& lostHosts) { + std::lock_guard lg(lock_); + if (!running_) { + auto retCode = recovery(); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Recovery balancer failed!"; + finish(); + return retCode; + } + if (plan_ == nullptr) { + LOG(INFO) << "There is no corrupted plan need to recovery, so create a new one"; + retCode = buildBalancePlan(std::move(lostHosts)); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Create balance plan failed"; + finish(); + return retCode; + } + } + LOG(INFO) << "Start to invoke balance plan " << plan_->id(); + executor_->add(std::bind(&BalancePlan::invoke, plan_.get())); + running_ = true; + return plan_->id(); + } + CHECK(!!plan_); + LOG(INFO) << "Balance plan " << plan_->id() << " is still running"; + return plan_->id(); +} + +ErrorOr Balancer::show(BalanceID id) const { + std::lock_guard lg(lock_); + if (plan_ != nullptr && plan_->id() == id) { + return *plan_; + } + + if (kv_) { + BalancePlan plan(id, kv_, client_); + auto retCode = plan.recovery(false); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get balance plan failed, id " << id; + return retCode; + } + return plan; + } + return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; +} + +ErrorOr Balancer::stop() { + std::lock_guard lg(lock_); + if (!running_) { + return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; + } + CHECK(!!plan_); + plan_->stop(); + LOG(INFO) << "Stop balance plan " << plan_->id(); + return plan_->id(); +} + +ErrorOr Balancer::cleanLastInValidPlan() { + std::lock_guard lg(lock_); + auto* store = static_cast(kv_); + if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { + return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; + } + if (running_) { + return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; + } + const auto& prefix = MetaKeyUtils::balancePlanPrefix(); + std::unique_ptr iter; + auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + // There should be at most one invalid plan, and it must be the latest one + if (iter->valid()) { + auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); + if (status == BalanceStatus::FAILED) { + auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); + folly::Baton baton; + auto result = nebula::cpp2::ErrorCode::SUCCEEDED; + // Only remove the plan will be enough + kv_->asyncMultiRemove(kDefaultSpaceId, + kDefaultPartId, + {iter->key().str()}, + [&baton, &result](nebula::cpp2::ErrorCode code) { + result = code; + baton.post(); + }); + baton.wait(); + if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { + return result; + } + return balanceId; + } + } + return nebula::cpp2::ErrorCode::E_NO_INVALID_BALANCE_PLAN; +} + +nebula::cpp2::ErrorCode Balancer::recovery() { + CHECK(!plan_) << "plan should be nullptr now"; + if (kv_) { + auto* store = static_cast(kv_); + if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { + // We need to check whether is leader or not, otherwise we would failed to + // persist state of BalancePlan and BalanceTask, so we just reject request + // if not leader. + return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; + } + const auto& prefix = MetaKeyUtils::balancePlanPrefix(); + std::unique_ptr iter; + auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + std::vector corruptedPlans; + // The balance plan is stored with balance id desc order, there should be at + // most one failed or in_progress plan, and it must be the latest one + if (iter->valid()) { + auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); + if (status == BalanceStatus::IN_PROGRESS || status == BalanceStatus::FAILED) { + auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); + corruptedPlans.emplace_back(balanceId); + } + } + if (corruptedPlans.empty()) { + LOG(INFO) << "No corrupted plan need to recovery!"; + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + + CHECK_EQ(1, corruptedPlans.size()); + plan_ = std::make_unique(corruptedPlans[0], kv_, client_); + plan_->onFinished_ = [this]() { + auto self = plan_; + { + std::lock_guard lg(lock_); + if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != + nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; + } + finish(); + } + }; + auto recRet = plan_->recovery(); + if (recRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't recovery plan " << corruptedPlans[0]; + return recRet; + } + } + // save the balance plan again because FAILED tasks would be marked as + // IN_PROGRESS again + return plan_->saveInStore(); +} + +nebula::cpp2::ErrorCode Balancer::getAllSpaces( + std::vector>& spaces) { + // Get all spaces + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::spacePrefix(); + std::unique_ptr iter; + auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get all spaces failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + while (iter->valid()) { + auto spaceId = MetaKeyUtils::spaceId(iter->key()); + auto properties = MetaKeyUtils::parseSpace(iter->val()); + bool zoned = properties.group_name_ref().has_value(); + spaces.emplace_back(spaceId, *properties.replica_factor_ref(), zoned); + iter->next(); + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +nebula::cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { + if (plan_ != nullptr) { + LOG(ERROR) << "Balance plan should be nullptr now"; + return nebula::cpp2::ErrorCode::E_BALANCED; + } + + std::vector> spaces; + auto spacesRet = getAllSpaces(spaces); + if (spacesRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't get all spaces"; + return spacesRet; + } + + plan_ = std::make_unique(time::WallClock::fastNowInSec(), kv_, client_); + for (const auto& spaceInfo : spaces) { + auto spaceId = std::get<0>(spaceInfo); + auto spaceReplica = std::get<1>(spaceInfo); + auto dependentOnGroup = std::get<2>(spaceInfo); + LOG(INFO) << "Balance Space " << spaceId; + auto taskRet = genTasks(spaceId, spaceReplica, dependentOnGroup, std::move(lostHosts)); + if (!ok(taskRet)) { + LOG(ERROR) << "Generate tasks on space " << std::get<0>(spaceInfo) << " failed"; + return error(taskRet); + } + + auto tasks = std::move(value(taskRet)); + for (auto& task : tasks) { + plan_->addTask(std::move(task)); + } + } + + plan_->onFinished_ = [this]() { + auto self = plan_; + { + std::lock_guard lg(lock_); + if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != + nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; + } + finish(); + } + }; + if (plan_->tasks_.empty()) { + return nebula::cpp2::ErrorCode::E_BALANCED; + } + return plan_->saveInStore(); +} + +ErrorOr> Balancer::genTasks( + GraphSpaceID spaceId, + int32_t spaceReplica, + bool dependentOnGroup, + std::vector&& lostHosts) { + HostParts hostParts; + int32_t totalParts = 0; + // hostParts is current part allocation map + auto result = getHostParts(spaceId, dependentOnGroup, hostParts, totalParts); + if (!nebula::ok(result)) { + return nebula::error(result); + } + + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || hostParts.empty()) { + LOG(ERROR) << "Invalid space " << spaceId; + return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; + } + + auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); + if (!nebula::ok(fetchHostPartsRet)) { + LOG(ERROR) << "Fetch hosts and parts failed"; + return nebula::error(fetchHostPartsRet); + } + + auto hostPartsRet = nebula::value(fetchHostPartsRet); + auto confirmedHostParts = hostPartsRet.first; + auto activeHosts = hostPartsRet.second; + LOG(INFO) << "Now, try to balance the confirmedHostParts"; + + // We have two parts need to balance, the first one is parts on lost hosts and + // deleted hosts The seconds one is parts on unbalanced host in + // confirmedHostParts. + std::vector tasks; + // 1. Iterate through all hosts that would not be included in + // confirmedHostParts, + // move all parts in them to host with minimum part in confirmedHostParts + for (auto& lostHost : lostHosts) { + auto& lostParts = hostParts[lostHost]; + for (auto& partId : lostParts) { + LOG(INFO) << "Try balance part " << partId << " for lost host " << lostHost; + // check whether any peers which is alive + auto alive = checkReplica(hostParts, activeHosts, spaceReplica, partId); + if (!alive.ok()) { + LOG(ERROR) << "Check Replica failed: " << alive << " Part: " << partId; + return nebula::cpp2::ErrorCode::E_NO_VALID_HOST; + } + + auto retCode = + transferLostHost(tasks, confirmedHostParts, lostHost, spaceId, partId, dependentOnGroup); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; + return retCode; + } + } + } + + // 2. Make all hosts in confirmedHostParts balanced + if (balanceParts(plan_->id_, spaceId, confirmedHostParts, totalParts, tasks, dependentOnGroup)) { + return tasks; + } else { + return nebula::cpp2::ErrorCode::E_BAD_BALANCE_PLAN; + } +} + +nebula::cpp2::ErrorCode Balancer::transferLostHost(std::vector& tasks, + HostParts& confirmedHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup) { + // find a host with minimum parts which doesn't have this part + ErrorOr result; + if (dependentOnGroup) { + result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); + } else { + result = hostWithMinimalParts(confirmedHostParts, partId); + } + + if (!nebula::ok(result)) { + LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; + return nebula::error(result); + } + const auto& targetHost = nebula::value(result); + confirmedHostParts[targetHost].emplace_back(partId); + tasks.emplace_back(plan_->id_, spaceId, partId, source, targetHost, kv_, client_); + zoneParts_[targetHost].second.emplace_back(partId); + auto zoneIt = + std::find(zoneParts_[source].second.begin(), zoneParts_[source].second.end(), partId); + if (zoneIt == zoneParts_[source].second.end()) { + LOG(ERROR) << "part not find " << partId << " at " << source; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +ErrorOr>> +Balancer::fetchHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + const HostParts& hostParts, + std::vector& lostHosts) { + ErrorOr> activeHostsRet; + if (dependentOnGroup) { + activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); + } else { + activeHostsRet = ActiveHostsMan::getActiveHosts(kv_); + } + + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + std::vector expand; + auto activeHosts = nebula::value(activeHostsRet); + calDiff(hostParts, activeHosts, expand, lostHosts); + // confirmedHostParts is new part allocation map after balance, it would + // include newlyAdded and exclude lostHosts + HostParts confirmedHostParts(hostParts); + for (const auto& h : expand) { + LOG(INFO) << "Found new host " << h; + confirmedHostParts.emplace(h, std::vector()); + } + for (const auto& h : lostHosts) { + LOG(INFO) << "Lost host " << h; + confirmedHostParts.erase(h); + } + return std::make_pair(confirmedHostParts, activeHosts); +} + +bool Balancer::balanceParts(BalanceID balanceId, + GraphSpaceID spaceId, + HostParts& confirmedHostParts, + int32_t totalParts, + std::vector& tasks, + bool dependentOnGroup) { + auto avgLoad = static_cast(totalParts) / confirmedHostParts.size(); + VLOG(3) << "The expect avg load is " << avgLoad; + int32_t minLoad = std::floor(avgLoad); + int32_t maxLoad = std::ceil(avgLoad); + VLOG(3) << "The min load is " << minLoad << " max load is " << maxLoad; + + auto sortedHosts = sortedHostsByParts(confirmedHostParts); + if (sortedHosts.empty()) { + LOG(ERROR) << "Host is empty"; + return false; + } + + auto maxPartsHost = sortedHosts.back(); + auto minPartsHost = sortedHosts.front(); + auto& sourceHost = maxPartsHost.first; + auto& targetHost = minPartsHost.first; + if (innerBalance_) { + LOG(INFO) << "maxPartsHost.first " << maxPartsHost.first << " minPartsHost.first " + << minPartsHost.first; + while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { + sortedHosts.pop_back(); + maxPartsHost = sortedHosts.back(); + } + + auto& source = maxPartsHost.first; + auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + auto& zoneName = iter->second.first; + int32_t hostsSize = zoneHosts_[zoneName].size(); + int32_t totalPartsZone = 0; + for (auto& host : zoneHosts_[zoneName]) { + auto it = confirmedHostParts.find(host); + if (it == confirmedHostParts.end()) { + LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; + continue; + } + totalPartsZone += it->second.size(); + } + + avgLoad = static_cast(totalPartsZone) / hostsSize; + minLoad = std::floor(avgLoad); + maxLoad = std::ceil(avgLoad); + LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone + << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad + << " The min load is " << minLoad << " max load is " << maxLoad; + } + + while (maxPartsHost.second > maxLoad || minPartsHost.second < minLoad) { + auto& partsFrom = confirmedHostParts[maxPartsHost.first]; + auto& partsTo = confirmedHostParts[minPartsHost.first]; + std::sort(partsFrom.begin(), partsFrom.end()); + std::sort(partsTo.begin(), partsTo.end()); + + LOG(INFO) << maxPartsHost.first << ":" << partsFrom.size() << " -> " << minPartsHost.first + << ":" << partsTo.size(); + std::vector diff; + std::set_difference(partsFrom.begin(), + partsFrom.end(), + partsTo.begin(), + partsTo.end(), + std::inserter(diff, diff.begin())); + bool noAction = true; + for (auto& partId : diff) { + LOG(INFO) << "partsFrom size " << partsFrom.size() << " partsTo size " << partsTo.size() + << " minLoad " << minLoad << " maxLoad " << maxLoad; + if (partsFrom.size() == partsTo.size() + 1 || + partsFrom.size() == static_cast(minLoad) || + partsTo.size() == static_cast(maxLoad)) { + VLOG(3) << "No need to move any parts from " << maxPartsHost.first << " to " + << minPartsHost.first; + break; + } + + LOG(INFO) << "[space:" << spaceId << ", part:" << partId << "] " << maxPartsHost.first << "->" + << minPartsHost.first; + auto it = std::find(partsFrom.begin(), partsFrom.end(), partId); + if (it == partsFrom.end()) { + LOG(ERROR) << "Part " << partId << " not found in partsFrom"; + return false; + } + + if (std::find(partsTo.begin(), partsTo.end(), partId) != partsTo.end()) { + LOG(ERROR) << "Part " << partId << " already existed in partsTo"; + return false; + } + + if (dependentOnGroup) { + if (!checkZoneLegal(sourceHost, targetHost)) { + LOG(INFO) << "sourceHost " << sourceHost << " targetHost " << targetHost + << " not same zone"; + + auto& parts = relatedParts_[targetHost]; + auto minIt = std::find(parts.begin(), parts.end(), partId); + if (minIt != parts.end()) { + LOG(INFO) << "Part " << partId << " have existed"; + continue; + } + } + + auto& sourceNoneName = zoneParts_[sourceHost].first; + auto sourceHosts = zoneHosts_.find(sourceNoneName); + for (auto& sh : sourceHosts->second) { + auto& parts = relatedParts_[sh]; + auto maxIt = std::find(parts.begin(), parts.end(), partId); + if (maxIt == parts.end()) { + LOG(INFO) << "Part " << partId << " not found on " << sh; + continue; + } + parts.erase(maxIt); + } + + auto& targetNoneName = zoneParts_[targetHost].first; + auto targetHosts = zoneHosts_.find(targetNoneName); + for (auto& th : targetHosts->second) { + relatedParts_[th].emplace_back(partId); + } + } + + partsFrom.erase(it); + partsTo.emplace_back(partId); + tasks.emplace_back( + balanceId, spaceId, partId, maxPartsHost.first, minPartsHost.first, kv_, client_); + noAction = false; + } + + if (noAction) { + LOG(INFO) << "Here is no action"; + break; + } + sortedHosts = sortedHostsByParts(confirmedHostParts); + maxPartsHost = sortedHosts.back(); + minPartsHost = sortedHosts.front(); + if (innerBalance_) { + while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { + sortedHosts.pop_back(); + maxPartsHost = sortedHosts.back(); + } + + auto& source = maxPartsHost.first; + auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + auto& zoneName = iter->second.first; + int32_t hostsSize = zoneHosts_[zoneName].size(); + int32_t totalPartsZone = 0; + for (auto& host : zoneHosts_[zoneName]) { + auto it = confirmedHostParts.find(host); + if (it == confirmedHostParts.end()) { + LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; + continue; + } + totalPartsZone += it->second.size(); + } + + avgLoad = static_cast(totalPartsZone) / hostsSize; + minLoad = std::floor(avgLoad); + maxLoad = std::ceil(avgLoad); + LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone + << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad + << " The min load is " << minLoad << " max load is " << maxLoad; + } + } + LOG(INFO) << "Balance tasks num: " << tasks.size(); + for (auto& task : tasks) { + LOG(INFO) << task.taskIdStr(); + } + + relatedParts_.clear(); + return true; +} + +ErrorOr Balancer::getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts) { + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::partPrefix(spaceId); + std::unique_ptr iter; + auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << " " + << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + while (iter->valid()) { + auto key = iter->key(); + PartitionID partId; + memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); + auto partHosts = MetaKeyUtils::parsePartVal(iter->val()); + for (auto& ph : partHosts) { + hostParts[ph].emplace_back(partId); + } + totalParts++; + iter->next(); + } + + LOG(INFO) << "Host size: " << hostParts.size(); + auto key = MetaKeyUtils::spaceKey(spaceId); + std::string value; + retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &value); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId + << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto properties = MetaKeyUtils::parseSpace(value); + if (totalParts != properties.get_partition_num()) { + LOG(ERROR) << "Partition number not equals"; + LOG(ERROR) << totalParts << " : " << properties.get_partition_num(); + return false; + } + + int32_t replica = properties.get_replica_factor(); + LOG(INFO) << "Replica " << replica; + if (dependentOnGroup && properties.group_name_ref().has_value()) { + auto groupName = *properties.group_name_ref(); + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + int32_t zoneSize = MetaKeyUtils::parseZoneNames(std::move(groupValue)).size(); + LOG(INFO) << "Zone Size " << zoneSize; + innerBalance_ = (replica == zoneSize); + + auto activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); + if (!nebula::ok(activeHostsRet)) { + return nebula::error(activeHostsRet); + } + + std::vector expand; + auto activeHosts = nebula::value(activeHostsRet); + std::vector lostHosts; + calDiff(hostParts, activeHosts, expand, lostHosts); + // confirmedHostParts is new part allocation map after balance, it would include newlyAdded + // and exclude lostHosts + HostParts confirmedHostParts(hostParts); + for (const auto& h : expand) { + LOG(INFO) << "Found new host " << h; + confirmedHostParts.emplace(h, std::vector()); + } + for (const auto& h : lostHosts) { + LOG(INFO) << "Lost host " << h; + confirmedHostParts.erase(h); + } + + auto zonePartsRet = assembleZoneParts(groupName, confirmedHostParts); + if (zonePartsRet != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; + return zonePartsRet; + } + } + + totalParts *= replica; + return true; +} + +nebula::cpp2::ErrorCode Balancer::assembleZoneParts(const std::string& groupName, + HostParts& hostParts) { + LOG(INFO) << "Balancer assembleZoneParts"; + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + // zoneHosts use to record this host belong to zone's hosts + std::unordered_map, std::vector> zoneHosts; + auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); + for (auto zoneName : zoneNames) { + LOG(INFO) << "Zone Name: " << zoneName; + auto zoneKey = MetaKeyUtils::zoneKey(zoneName); + std::string zoneValue; + retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get zone " << zoneName + << " failed: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); + for (const auto& host : hosts) { + LOG(INFO) << "Host for zone " << host; + auto pair = std::pair(std::move(host), zoneName); + auto& hs = zoneHosts[std::move(pair)]; + hs.insert(hs.end(), hosts.begin(), hosts.end()); + } + } + + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + auto host = it->first; + LOG(INFO) << "Host: " << host; + auto zoneIter = + std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { + return host == pair.first.first; + }); + + if (zoneIter == zoneHosts.end()) { + LOG(INFO) << it->first << " have lost"; + continue; + } + + auto& hosts = zoneIter->second; + auto name = zoneIter->first.second; + zoneHosts_[name] = hosts; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto partIter = hostParts.find(*hostIter); + LOG(INFO) << "Zone " << name << " have the host " << it->first; + if (partIter == hostParts.end()) { + zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); + } else { + zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); + } + } + } + + for (auto it = zoneHosts.begin(); it != zoneHosts.end(); it++) { + auto host = it->first.first; + auto& hosts = it->second; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto h = *hostIter; + auto iter = std::find_if(hostParts.begin(), hostParts.end(), [h](const auto& pair) -> bool { + return h == pair.first; + }); + + if (iter == hostParts.end()) { + continue; + } + + auto& parts = iter->second; + auto& hp = relatedParts_[host]; + hp.insert(hp.end(), parts.begin(), parts.end()); + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +void Balancer::calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& expand, + std::vector& lost) { + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + VLOG(1) << "Original Host " << it->first << ", parts " << it->second.size(); + if (std::find(activeHosts.begin(), activeHosts.end(), it->first) == activeHosts.end() && + std::find(lost.begin(), lost.end(), it->first) == lost.end()) { + lost.emplace_back(it->first); + } + } + for (auto& h : activeHosts) { + VLOG(1) << "Active host " << h; + if (hostParts.find(h) == hostParts.end()) { + expand.emplace_back(h); + } + } +} + +std::vector> Balancer::sortedHostsByParts(const HostParts& hostParts) { + std::vector> hosts; + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + LOG(INFO) << "Host " << it->first << " parts " << it->second.size(); + hosts.emplace_back(it->first, it->second.size()); + } + std::sort(hosts.begin(), hosts.end(), [](const auto& l, const auto& r) { + if (l.second != r.second) { + return l.second < r.second; + } else { + return l.first.host < r.first.host; + } + }); + return hosts; +} + +Status Balancer::checkReplica(const HostParts& hostParts, + const std::vector& activeHosts, + int32_t replica, + PartitionID partId) { + // check host hold the part and alive + auto checkPart = [&](const auto& entry) { + auto& host = entry.first; + auto& parts = entry.second; + return std::find(parts.begin(), parts.end(), partId) != parts.end() && + std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); + }; + auto aliveReplica = std::count_if(hostParts.begin(), hostParts.end(), checkPart); + if (aliveReplica >= replica / 2 + 1) { + return Status::OK(); + } + return Status::Error("Not enough alive host hold the part %d", partId); +} + +ErrorOr Balancer::hostWithMinimalParts( + const HostParts& hostParts, PartitionID partId) { + auto hosts = sortedHostsByParts(hostParts); + for (auto& h : hosts) { + auto it = hostParts.find(h.first); + if (it == hostParts.end()) { + LOG(ERROR) << "Host " << h.first << " not found"; + return nebula::cpp2::ErrorCode::E_NO_HOSTS; + } + + if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { + return h.first; + } + } + return nebula::cpp2::ErrorCode::E_NO_HOSTS; +} + +ErrorOr Balancer::hostWithMinimalPartsForZone( + const HostAddr& source, const HostParts& hostParts, PartitionID partId) { + auto hosts = sortedHostsByParts(hostParts); + for (auto& h : hosts) { + auto it = hostParts.find(h.first); + if (it == hostParts.end()) { + LOG(ERROR) << "Host " << h.first << " not found"; + return nebula::cpp2::ErrorCode::E_NO_HOSTS; + } + + LOG(INFO) << "source " << source << " h.first " << h.first; + if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { + return h.first; + } + } + return nebula::cpp2::ErrorCode::E_NO_HOSTS; +} + +nebula::cpp2::ErrorCode Balancer::leaderBalance() { + if (running_) { + LOG(INFO) << "Balance process still running"; + return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; + } + + folly::Promise promise; + auto future = promise.getFuture(); + // Space ID, Replica Factor and Dependent On Group + std::vector> spaces; + auto ret = getAllSpaces(spaces); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Can't get spaces"; + // TODO unify error code + if (ret != nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + ret = nebula::cpp2::ErrorCode::E_STORE_FAILURE; + } + return ret; + } + + bool expected = false; + if (inLeaderBalance_.compare_exchange_strong(expected, true)) { + hostLeaderMap_.reset(new HostLeaderMap); + auto status = client_->getLeaderDist(hostLeaderMap_.get()).get(); + if (!status.ok() || hostLeaderMap_->empty()) { + LOG(ERROR) << "Get leader distribution failed"; + inLeaderBalance_ = false; + return nebula::cpp2::ErrorCode::E_RPC_FAILURE; + } + + std::vector> futures; + for (const auto& spaceInfo : spaces) { + auto spaceId = std::get<0>(spaceInfo); + auto replicaFactor = std::get<1>(spaceInfo); + auto dependentOnGroup = std::get<2>(spaceInfo); + LeaderBalancePlan plan; + auto balanceResult = buildLeaderBalancePlan( + hostLeaderMap_.get(), spaceId, replicaFactor, dependentOnGroup, plan); + if (!nebula::ok(balanceResult) || !nebula::value(balanceResult)) { + LOG(ERROR) << "Building leader balance plan failed " + << "Space: " << spaceId; + continue; + } + simplifyLeaderBalancePlan(spaceId, plan); + for (const auto& task : plan) { + futures.emplace_back(client_->transLeader(std::get<0>(task), + std::get<1>(task), + std::move(std::get<2>(task)), + std::move(std::get<3>(task)))); + } + } + + int32_t failed = 0; + folly::collectAll(futures) + .via(executor_.get()) + .thenTry([&](const auto& result) { + auto tries = result.value(); + for (const auto& t : tries) { + if (!t.value().ok()) { + ++failed; + } + } + }) + .wait(); + + inLeaderBalance_ = false; + if (failed != 0) { + LOG(ERROR) << failed << " partition failed to transfer leader"; + } + return nebula::cpp2::ErrorCode::SUCCEEDED; + } + return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; +} + +ErrorOr Balancer::buildLeaderBalancePlan( + HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation) { + PartAllocation peersMap; + HostParts leaderHostParts; + size_t leaderParts = 0; + // store peers of all partitions in peerMap + folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); + const auto& prefix = MetaKeyUtils::partPrefix(spaceId); + std::unique_ptr iter; + auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << static_cast(retCode); + return retCode; + } + + while (iter->valid()) { + auto key = iter->key(); + PartitionID partId; + memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); + auto peers = MetaKeyUtils::parsePartVal(iter->val()); + peersMap[partId] = std::move(peers); + ++leaderParts; + iter->next(); + } + + int32_t totalParts = 0; + HostParts allHostParts; + auto result = getHostParts(spaceId, dependentOnGroup, allHostParts, totalParts); + if (!nebula::ok(result)) { + return nebula::error(result); + } else { + auto retVal = nebula::value(result); + if (!retVal || totalParts == 0 || allHostParts.empty()) { + LOG(ERROR) << "Invalid space " << spaceId; + return false; + } + } + + std::unordered_set activeHosts; + for (const auto& host : *hostLeaderMap) { + // only balance leader between hosts which have valid partition + if (!allHostParts[host.first].empty()) { + activeHosts.emplace(host.first); + leaderHostParts[host.first] = (*hostLeaderMap)[host.first][spaceId]; + } + } + + if (activeHosts.empty()) { + LOG(ERROR) << "No active hosts"; + return false; + } + + if (dependentOnGroup) { + for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { + auto min = it->second.size() / replicaFactor; + VLOG(3) << "Host: " << it->first << " Bounds: " << min << " : " << min + 1; + hostBounds_[it->first] = std::make_pair(min, min + 1); + } + } else { + size_t activeSize = activeHosts.size(); + size_t globalAvg = leaderParts / activeSize; + size_t globalMin = globalAvg; + size_t globalMax = globalAvg; + if (leaderParts % activeSize != 0) { + globalMax += 1; + } + + if (useDeviation) { + globalMin = std::ceil(static_cast(leaderParts) / activeSize * + (1 - FLAGS_leader_balance_deviation)); + globalMax = std::floor(static_cast(leaderParts) / activeSize * + (1 + FLAGS_leader_balance_deviation)); + } + VLOG(3) << "Build leader balance plan, expected min load: " << globalMin + << ", max load: " << globalMax << " avg: " << globalAvg; + + for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { + hostBounds_[it->first] = std::make_pair(globalMin, globalMax); + } + } + + while (true) { + int32_t taskCount = 0; + bool hasUnbalancedHost = false; + for (const auto& hostEntry : leaderHostParts) { + auto host = hostEntry.first; + auto& hostMinLoad = hostBounds_[host].first; + auto& hostMaxLoad = hostBounds_[host].second; + int32_t partSize = hostEntry.second.size(); + if (hostMinLoad <= partSize && partSize <= hostMaxLoad) { + VLOG(3) << partSize << " is between min load " << hostMinLoad << " and max load " + << hostMaxLoad; + continue; + } + + hasUnbalancedHost = true; + if (partSize < hostMinLoad) { + // need to acquire leader from other hosts + LOG(INFO) << "Acquire leaders to host: " << host << " loading: " << partSize + << " min loading " << hostMinLoad; + taskCount += acquireLeaders( + allHostParts, leaderHostParts, peersMap, activeHosts, host, plan, spaceId); + } else { + // need to transfer leader to other hosts + LOG(INFO) << "Giveup leaders from host: " << host << " loading: " << partSize + << " max loading " << hostMaxLoad; + taskCount += giveupLeaders(leaderHostParts, peersMap, activeHosts, host, plan, spaceId); + } + } + + // If every host is balanced or no more task during this loop, then the plan + // is done + if (!hasUnbalancedHost || taskCount == 0) { + LOG(INFO) << "Not need balance"; + break; + } + } + return true; +} + +int32_t Balancer::acquireLeaders(HostParts& allHostParts, + HostParts& leaderHostParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& target, + LeaderBalancePlan& plan, + GraphSpaceID spaceId) { + // host will loop for the partition which is not leader, and try to acquire the + // leader + int32_t taskCount = 0; + std::vector diff; + std::set_difference(allHostParts[target].begin(), + allHostParts[target].end(), + leaderHostParts[target].begin(), + leaderHostParts[target].end(), + std::back_inserter(diff)); + auto& targetLeaders = leaderHostParts[target]; + size_t minLoad = hostBounds_[target].first; + for (const auto& partId : diff) { + VLOG(3) << "Try acquire leader for part " << partId; + // find the leader of partId + auto sources = peersMap[partId]; + for (const auto& source : sources) { + if (source == target || !activeHosts.count(source)) { + continue; + } + + // if peer is the leader of partId and can transfer, then transfer it to + // host + auto& sourceLeaders = leaderHostParts[source]; + VLOG(3) << "Check peer: " << source << " min load: " << minLoad + << " peerLeaders size: " << sourceLeaders.size(); + auto it = std::find(sourceLeaders.begin(), sourceLeaders.end(), partId); + if (it != sourceLeaders.end() && minLoad < sourceLeaders.size()) { + sourceLeaders.erase(it); + targetLeaders.emplace_back(partId); + plan.emplace_back(spaceId, partId, source, target); + LOG(INFO) << "acquire plan trans leader space: " << spaceId << " part: " << partId + << " from " << source.host << ":" << source.port << " to " << target.host << ":" + << target.port; + ++taskCount; + break; + } + } + + // if host has enough leader, just return + if (targetLeaders.size() == minLoad) { + LOG(INFO) << "Host: " << target << "'s leader reach " << minLoad; + break; + } + } + return taskCount; +} + +int32_t Balancer::giveupLeaders(HostParts& leaderParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& source, + LeaderBalancePlan& plan, + GraphSpaceID spaceId) { + int32_t taskCount = 0; + auto& sourceLeaders = leaderParts[source]; + size_t maxLoad = hostBounds_[source].second; + + // host will try to transfer the extra leaders to other peers + for (auto it = sourceLeaders.begin(); it != sourceLeaders.end();) { + // find the leader of partId + auto partId = *it; + const auto& targets = peersMap[partId]; + bool isErase = false; + + // leader should move to the peer with lowest loading + auto target = + std::min_element(targets.begin(), targets.end(), [&](const auto& l, const auto& r) -> bool { + if (source == l || !activeHosts.count(l)) { + return false; + } + return leaderParts[l].size() < leaderParts[r].size(); + }); + + // If peer can accept this partition leader, than host will transfer to the + // peer + if (target != targets.end()) { + auto& targetLeaders = leaderParts[*target]; + int32_t targetLeaderSize = targetLeaders.size(); + if (targetLeaderSize < hostBounds_[*target].second) { + it = sourceLeaders.erase(it); + targetLeaders.emplace_back(partId); + plan.emplace_back(spaceId, partId, source, *target); + LOG(INFO) << "giveup plan trans leader space: " << spaceId << " part: " << partId + << " from " << source.host << ":" << source.port << " to " << target->host << ":" + << target->port; + ++taskCount; + isErase = true; + } + } + + // if host has enough leader, just return + if (sourceLeaders.size() == maxLoad) { + LOG(INFO) << "Host: " << source << "'s leader reach " << maxLoad; + break; + } + + if (!isErase) { + ++it; + } + } + return taskCount; +} + +void Balancer::simplifyLeaderBalancePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan) { + // Within a leader balance plan, a partition may be moved several times, but + // actually we only need to transfer the leadership of a partition from the + // first host to the last host, and ignore the intermediate ones + std::unordered_map buckets; + for (auto& task : plan) { + buckets[std::get<1>(task)].emplace_back(task); + } + plan.clear(); + for (const auto& partEntry : buckets) { + plan.emplace_back(spaceId, + partEntry.first, + std::get<2>(partEntry.second.front()), + std::get<3>(partEntry.second.back())); + } +} + +nebula::cpp2::ErrorCode Balancer::collectZoneParts(const std::string& groupName, + HostParts& hostParts) { + auto groupKey = MetaKeyUtils::groupKey(groupName); + std::string groupValue; + auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get group " << groupName + << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + // zoneHosts use to record this host belong to zone's hosts + std::unordered_map, std::vector> zoneHosts; + auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); + for (auto zoneName : zoneNames) { + auto zoneKey = MetaKeyUtils::zoneKey(zoneName); + std::string zoneValue; + retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); + if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { + LOG(ERROR) << "Get zone " << zoneName + << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); + return retCode; + } + + auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); + for (const auto& host : hosts) { + auto pair = std::pair(std::move(host), zoneName); + auto& hs = zoneHosts[std::move(pair)]; + hs.insert(hs.end(), hosts.begin(), hosts.end()); + } + } + + for (auto it = hostParts.begin(); it != hostParts.end(); it++) { + auto host = it->first; + auto zoneIter = + std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { + return host == pair.first.first; + }); + + if (zoneIter == zoneHosts.end()) { + LOG(INFO) << it->first << " have lost"; + continue; + } + + auto& hosts = zoneIter->second; + auto name = zoneIter->first.second; + for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { + auto partIter = hostParts.find(*hostIter); + if (partIter == hostParts.end()) { + zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); + } else { + zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); + } + } + } + return nebula::cpp2::ErrorCode::SUCCEEDED; +} + +bool Balancer::checkZoneLegal(const HostAddr& source, const HostAddr& target) { + VLOG(3) << "Check " << source << " : " << target; + auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { + return source == pair.first; + }); + + if (sourceIter == zoneParts_.end()) { + LOG(INFO) << "Source " << source << " not found"; + return false; + } + + auto targetIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&target](const auto& pair) { + return target == pair.first; + }); + + if (targetIter == zoneParts_.end()) { + LOG(INFO) << "Target " << target << " not found"; + return false; + } + + LOG(INFO) << sourceIter->second.first << " : " << targetIter->second.first; + return sourceIter->second.first == targetIter->second.first; +} + +} // namespace meta +} // namespace nebula diff --git a/src/meta/processors/admin/Balancer.h b/src/meta/processors/admin/Balancer.h new file mode 100644 index 00000000000..4a5331ee2a4 --- /dev/null +++ b/src/meta/processors/admin/Balancer.h @@ -0,0 +1,269 @@ +/* Copyright (c) 2019 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef META_ADMIN_BALANCER_H_ +#define META_ADMIN_BALANCER_H_ + +#include +#include + +#include "common/network/NetworkUtils.h" +#include "common/time/WallClock.h" +#include "kvstore/KVStore.h" +#include "meta/processors/admin/AdminClient.h" +#include "meta/processors/admin/BalancePlan.h" +#include "meta/processors/admin/BalanceTask.h" + +namespace nebula { +namespace meta { + +using HostParts = std::unordered_map>; +using PartAllocation = std::unordered_map>; +using LeaderBalancePlan = std::vector>; +using ZoneNameAndParts = std::pair>; + +/** +There are two interfaces public: + * Balance: it will construct a balance plan and invoked it. If last balance +plan is not succeeded, it will + * try to resume it. + * + * Rollback: In many cases, if some plan failed forever, we call this interface +to rollback. + +Some notes: +1. Balance will generate balance plan according to current active hosts and +parts allocation +2. For the plan, we hope after moving the least parts , it will reach a +reasonable state. +3. Only one balance plan could be invoked at the same time. +4. Each balance plan has one id, and we could show the status by "balance id" +command and after FO, we could resume the balance plan by type "balance" again. +5. Each balance plan contains many balance tasks, the task represents the +minimum movement unit. +6. We save the whole balancePlan state in kvstore to do failover. +7. Each balance task contains serval steps. And it should be executed step by +step. +8. One task failed will result in the whole balance plan failed. +9. Currently, we hope tasks for the same part could be invoked serially + * */ +class Balancer { + FRIEND_TEST(BalanceTest, BalancePartsTest); + FRIEND_TEST(BalanceTest, NormalTest); + FRIEND_TEST(BalanceTest, SimpleTestWithZone); + FRIEND_TEST(BalanceTest, SpecifyHostTest); + FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); + FRIEND_TEST(BalanceTest, MockReplaceMachineTest); + FRIEND_TEST(BalanceTest, SingleReplicaTest); + FRIEND_TEST(BalanceTest, TryToRecoveryTest); + FRIEND_TEST(BalanceTest, RecoveryTest); + FRIEND_TEST(BalanceTest, StopPlanTest); + FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceTest); + FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); + FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); + FRIEND_TEST(BalanceTest, ExpansionZoneTest); + FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); + FRIEND_TEST(BalanceTest, ShrinkZoneTest); + FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); + FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); + FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); + FRIEND_TEST(BalanceIntegrationTest, BalanceTest); + + public: + static Balancer* instance(kvstore::KVStore* kv) { + static std::unique_ptr client(new AdminClient(kv)); + static std::unique_ptr balancer(new Balancer(kv, client.get())); + return balancer.get(); + } + + ~Balancer() = default; + + /* + * Return Error if reject the balance request, otherwise return balance id. + * */ + ErrorOr balance(std::vector&& lostHosts = {}); + + /** + * Show balance plan id status. + * */ + ErrorOr show(BalanceID id) const; + + /** + * Stop balance plan by canceling all waiting balance task. + * */ + ErrorOr stop(); + + /** + * Clean invalid plan, return the invalid plan key if any + * */ + ErrorOr cleanLastInValidPlan(); + + /** + * TODO(heng): rollback some balance plan. + */ + Status rollback(BalanceID id) { return Status::Error("unimplemented, %ld", id); } + + /** + * TODO(heng): Execute balance plan from outside. + * */ + Status execute(BalancePlan plan) { + UNUSED(plan); + return Status::Error("Unsupport it yet!"); + } + + /** + * TODO(heng): Execute specific balance plan by id. + * */ + Status execute(BalanceID id) { + UNUSED(id); + return Status::Error("Unsupport it yet!"); + } + + nebula::cpp2::ErrorCode leaderBalance(); + + void finish() { + CHECK(!lock_.try_lock()); + plan_.reset(); + running_ = false; + } + + bool isRunning() { + std::lock_guard lg(lock_); + return running_; + } + + private: + Balancer(kvstore::KVStore* kv, AdminClient* client) : kv_(kv), client_(client) { + executor_.reset(new folly::CPUThreadPoolExecutor(1)); + } + /* + * When the balancer failover, we should recovery the status. + * */ + nebula::cpp2::ErrorCode recovery(); + + /** + * Build balance plan and save it in kvstore. + * */ + nebula::cpp2::ErrorCode buildBalancePlan(std::vector&& lostHosts); + + ErrorOr> genTasks( + GraphSpaceID spaceId, + int32_t spaceReplica, + bool dependentOnGroup, + std::vector&& lostHosts); + + ErrorOr>> fetchHostParts( + GraphSpaceID spaceId, + bool dependentOnGroup, + const HostParts& hostParts, + std::vector& lostHosts); + + ErrorOr getHostParts(GraphSpaceID spaceId, + bool dependentOnGroup, + HostParts& hostParts, + int32_t& totalParts); + + nebula::cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); + + void calDiff(const HostParts& hostParts, + const std::vector& activeHosts, + std::vector& newlyAdded, + std::vector& lost); + + Status checkReplica(const HostParts& hostParts, + const std::vector& activeHosts, + int32_t replica, + PartitionID partId); + + ErrorOr hostWithMinimalParts(const HostParts& hostParts, + PartitionID partId); + + ErrorOr hostWithMinimalPartsForZone(const HostAddr& source, + const HostParts& hostParts, + PartitionID partId); + + bool balanceParts(BalanceID balanceId, + GraphSpaceID spaceId, + HostParts& newHostParts, + int32_t totalParts, + std::vector& tasks, + bool dependentOnGroup); + + nebula::cpp2::ErrorCode transferLostHost(std::vector& tasks, + HostParts& newHostParts, + const HostAddr& source, + GraphSpaceID spaceId, + PartitionID partId, + bool dependentOnGroup); + + std::vector> sortedHostsByParts(const HostParts& hostParts); + + nebula::cpp2::ErrorCode getAllSpaces( + std::vector>& spaces); + + ErrorOr buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, + GraphSpaceID spaceId, + int32_t replicaFactor, + bool dependentOnGroup, + LeaderBalancePlan& plan, + bool useDeviation = true); + + void simplifyLeaderBalancePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); + + int32_t acquireLeaders(HostParts& allHostParts, + HostParts& leaderHostParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& target, + LeaderBalancePlan& plan, + GraphSpaceID spaceId); + + int32_t giveupLeaders(HostParts& leaderHostParts, + PartAllocation& peersMap, + std::unordered_set& activeHosts, + const HostAddr& source, + LeaderBalancePlan& plan, + GraphSpaceID spaceId); + + nebula::cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); + + bool checkZoneLegal(const HostAddr& source, const HostAddr& target); + + private: + std::atomic_bool running_{false}; + kvstore::KVStore* kv_{nullptr}; + AdminClient* client_{nullptr}; + // Current running plan. + std::shared_ptr plan_{nullptr}; + std::unique_ptr executor_; + std::atomic_bool inLeaderBalance_{false}; + + // Host => Graph => Partitions + std::unique_ptr hostLeaderMap_; + mutable std::mutex lock_; + + std::unordered_map> hostBounds_; + + // TODO: (darion) nesting map maybe better + std::unordered_map zoneParts_; + std::unordered_map> zoneHosts_; + + // if the space dependent on group, it use to record the partition + // contained in the zone related to the node. + std::unordered_map> relatedParts_; + + bool innerBalance_ = false; +}; + +} // namespace meta +} // namespace nebula + +#endif // META_ADMIN_BALANCER_H_ diff --git a/src/meta/processors/admin/CreateBackupProcessor.cpp b/src/meta/processors/admin/CreateBackupProcessor.cpp index 4ad2e9eb542..e2454366a5e 100644 --- a/src/meta/processors/admin/CreateBackupProcessor.cpp +++ b/src/meta/processors/admin/CreateBackupProcessor.cpp @@ -84,7 +84,7 @@ void CreateBackupProcessor::process(const cpp2::CreateBackupReq& req) { } JobManager* jobMgr = JobManager::getInstance(); - auto result = jobMgr->checkIndexJobRuning(); + auto result = jobMgr->checkIndexJobRunning(); if (!nebula::ok(result)) { LOG(ERROR) << "get Index status failed, not allowed to create backup."; handleErrorCode(nebula::error(result)); diff --git a/src/meta/processors/admin/CreateSnapshotProcessor.cpp b/src/meta/processors/admin/CreateSnapshotProcessor.cpp index 0af266ebb59..6e57a88b781 100644 --- a/src/meta/processors/admin/CreateSnapshotProcessor.cpp +++ b/src/meta/processors/admin/CreateSnapshotProcessor.cpp @@ -16,7 +16,7 @@ void CreateSnapshotProcessor::process(const cpp2::CreateSnapshotReq&) { // check the index rebuild. not allowed to create snapshot when index // rebuilding. JobManager* jobMgr = JobManager::getInstance(); - auto result = jobMgr->checkIndexJobRuning(); + auto result = jobMgr->checkIndexJobRunning(); if (!nebula::ok(result)) { handleErrorCode(nebula::error(result)); onFinished(); diff --git a/src/meta/processors/admin/HBProcessor.cpp b/src/meta/processors/admin/HBProcessor.cpp index fd535e3ec51..6abdb2eff1b 100644 --- a/src/meta/processors/admin/HBProcessor.cpp +++ b/src/meta/processors/admin/HBProcessor.cpp @@ -33,11 +33,11 @@ void HBProcessor::process(const cpp2::HBReq& req) { LOG(INFO) << "Receive heartbeat from " << host << ", role = " << apache::thrift::util::enumNameSafe(req.get_role()); if (req.get_role() == cpp2::HostRole::STORAGE) { - ClusterID peerCluserId = req.get_cluster_id(); - if (peerCluserId == 0) { + ClusterID peerClusterId = req.get_cluster_id(); + if (peerClusterId == 0) { LOG(INFO) << "Set clusterId for new host " << host << "!"; resp_.set_cluster_id(clusterId_); - } else if (peerCluserId != clusterId_) { + } else if (peerClusterId != clusterId_) { LOG(ERROR) << "Reject wrong cluster host " << host << "!"; handleErrorCode(nebula::cpp2::ErrorCode::E_WRONGCLUSTER); onFinished(); diff --git a/src/meta/processors/admin/VerifyClientVersionProcessor.cpp b/src/meta/processors/admin/VerifyClientVersionProcessor.cpp index bca4b824429..314ab2e9ec8 100644 --- a/src/meta/processors/admin/VerifyClientVersionProcessor.cpp +++ b/src/meta/processors/admin/VerifyClientVersionProcessor.cpp @@ -10,7 +10,7 @@ DEFINE_bool(enable_client_white_list, true, "Turn on/off the client white list."); DEFINE_string(client_white_list, nebula::getOriginVersion() + ":2.5.0:2.5.1:2.6.0", - "A white list for different client versions, seperate with colon."); + "A white list for different client versions, separate with colon."); namespace nebula { namespace meta { diff --git a/src/meta/processors/job/GetStatsProcessor.cpp b/src/meta/processors/job/GetStatsProcessor.cpp index 62065fa0a38..094d018cc7d 100644 --- a/src/meta/processors/job/GetStatsProcessor.cpp +++ b/src/meta/processors/job/GetStatsProcessor.cpp @@ -30,8 +30,8 @@ void GetStatsProcessor::process(const cpp2::GetStatsReq& req) { return; } auto statsItem = MetaKeyUtils::parseStatsVal(val); - auto statisJobStatus = statsItem.get_status(); - if (statisJobStatus != cpp2::JobStatus::FINISHED) { + auto statsJobStatus = statsItem.get_status(); + if (statsJobStatus != cpp2::JobStatus::FINISHED) { LOG(ERROR) << "SpaceId " << spaceId << " stats job is running or failed, please execute `show jobs' firstly."; handleErrorCode(nebula::cpp2::ErrorCode::E_JOB_NOT_FINISHED); diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index 8d53afd4430..2d15e262d3b 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -122,7 +122,7 @@ void JobManager::scheduleThread() { auto jobDescRet = JobDescription::loadJobDescription(opJobId.second, kvStore_); if (!nebula::ok(jobDescRet)) { LOG(ERROR) << "[JobManager] load an invalid job from queue " << opJobId.second; - continue; // leader change or archive happend + continue; // leader change or archive happened } auto jobDesc = nebula::value(jobDescRet); if (!jobDesc.setStatus(cpp2::JobStatus::RUNNING)) { @@ -334,8 +334,8 @@ nebula::cpp2::ErrorCode JobManager::reportTaskFinish(const cpp2::ReportTaskReq& "report to an in-active job manager, job={}, task={}", jobId, taskId); return nebula::cpp2::ErrorCode::E_UNKNOWN; } - // bacause the last task will update the job's status - // tasks shoule report once a time + // because the last task will update the job's status + // tasks should report once a time std::lock_guard lk(muReportFinish_); auto tasksRet = getAllTasks(jobId); if (!nebula::ok(tasksRet)) { @@ -691,7 +691,7 @@ ErrorOr JobManager::getSpaceId(const std: return *reinterpret_cast(val.c_str()); } -ErrorOr JobManager::checkIndexJobRuning() { +ErrorOr JobManager::checkIndexJobRunning() { std::unique_ptr iter; auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { diff --git a/src/meta/processors/job/JobManager.h b/src/meta/processors/job/JobManager.h index ab9b2280269..8d9a2e8d188 100644 --- a/src/meta/processors/job/JobManager.h +++ b/src/meta/processors/job/JobManager.h @@ -102,14 +102,14 @@ class JobManager : public nebula::cpp::NonCopyable, public nebula::cpp::NonMovab size_t jobSize() const; // Tries to extract an element from the front of the highPriorityQueue_, - // if faild, then extract an element from lowPriorityQueue_. + // if failed, then extract an element from lowPriorityQueue_. // If the element is obtained, return true, otherwise return false. bool try_dequeue(std::pair& opJobId); // Enter different priority queues according to the command type void enqueue(const JbOp& op, const JobID& jobId, const cpp2::AdminCmd& cmd); - ErrorOr checkIndexJobRuning(); + ErrorOr checkIndexJobRunning(); nebula::cpp2::ErrorCode handleRemainingJobs(); diff --git a/src/meta/processors/job/MetaJobExecutor.cpp b/src/meta/processors/job/MetaJobExecutor.cpp index f41e8d4eb2a..4a1c648ed6a 100644 --- a/src/meta/processors/job/MetaJobExecutor.cpp +++ b/src/meta/processors/job/MetaJobExecutor.cpp @@ -84,7 +84,7 @@ ErrOrHosts MetaJobExecutor::getTargetHost(GraphSpaceID spaceId) { return retCode; } - // use vector instead of set because this can convient for next step + // use vector instead of set because this can convenient for next step std::unordered_map> hostAndPart; std::vector>> hosts; while (iter->valid()) { @@ -224,15 +224,15 @@ nebula::cpp2::ErrorCode MetaJobExecutor::execute() { } } - std::vector> futs; + std::vector> futures; for (auto& address : addresses) { // transform to the admin host auto h = Utils::getAdminAddrFromStoreAddr(address.first); - futs.emplace_back(executeInternal(std::move(h), std::move(address.second))); + futures.emplace_back(executeInternal(std::move(h), std::move(address.second))); } auto rc = nebula::cpp2::ErrorCode::SUCCEEDED; - auto tries = folly::collectAll(std::move(futs)).get(); + auto tries = folly::collectAll(std::move(futures)).get(); for (auto& t : tries) { if (t.hasException()) { LOG(ERROR) << t.exception().what(); diff --git a/src/meta/processors/job/StatsJobExecutor.cpp b/src/meta/processors/job/StatsJobExecutor.cpp index 26d50c7d1e9..ceab9a7b085 100644 --- a/src/meta/processors/job/StatsJobExecutor.cpp +++ b/src/meta/processors/job/StatsJobExecutor.cpp @@ -50,7 +50,7 @@ nebula::cpp2::ErrorCode StatsJobExecutor::prepare() { } space_ = nebula::value(spaceRet); - // Set the status of the statis job to running + // Set the status of the stats job to running cpp2::StatsItem statsItem; statsItem.set_status(cpp2::JobStatus::RUNNING); auto statsKey = MetaKeyUtils::statsKey(space_); @@ -158,7 +158,7 @@ nebula::cpp2::ErrorCode StatsJobExecutor::finish(bool exeSuccessed) { std::string val; auto ret = kvstore_->get(kDefaultSpaceId, kDefaultPartId, tempKey, &val); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't find the statis data, spaceId : " << space_; + LOG(ERROR) << "Can't find the stats data, spaceId : " << space_; return ret; } auto statsItem = MetaKeyUtils::parseStatsVal(val); @@ -170,7 +170,7 @@ nebula::cpp2::ErrorCode StatsJobExecutor::finish(bool exeSuccessed) { auto statsVal = MetaKeyUtils::statsVal(statsItem); auto retCode = save(statsKey, statsVal); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Sace statis data failed, error " << apache::thrift::util::enumNameSafe(retCode); + LOG(ERROR) << "Sace stats data failed, error " << apache::thrift::util::enumNameSafe(retCode); return retCode; } return doRemove(tempKey); @@ -196,13 +196,13 @@ nebula::cpp2::ErrorCode StatsJobExecutor::stop() { auto tries = folly::collectAll(std::move(futures)).get(); if (std::any_of(tries.begin(), tries.end(), [](auto& t) { return t.hasException(); })) { - LOG(ERROR) << "statis job stop() RPC failure."; + LOG(ERROR) << "stats job stop() RPC failure."; return nebula::cpp2::ErrorCode::E_BALANCER_FAILURE; } for (const auto& t : tries) { if (!t.value().ok()) { - LOG(ERROR) << "Stop statis job Failed"; + LOG(ERROR) << "Stop stats job Failed"; return nebula::cpp2::ErrorCode::E_BALANCER_FAILURE; } } diff --git a/src/meta/processors/job/StatsJobExecutor.h b/src/meta/processors/job/StatsJobExecutor.h index 70bd028058b..a7e3e23ab2f 100644 --- a/src/meta/processors/job/StatsJobExecutor.h +++ b/src/meta/processors/job/StatsJobExecutor.h @@ -39,7 +39,7 @@ class StatsJobExecutor : public MetaJobExecutor { private: // Stats job writes an additional data. - // The additional data is written when the statis job passes the check + // The additional data is written when the stats job passes the check // function. Update this additional data when job finishes. nebula::cpp2::ErrorCode save(const std::string& key, const std::string& val); diff --git a/src/meta/processors/kv/RemoveProcessor.h b/src/meta/processors/kv/RemoveProcessor.h index d789c5b338e..652f79c0dbb 100644 --- a/src/meta/processors/kv/RemoveProcessor.h +++ b/src/meta/processors/kv/RemoveProcessor.h @@ -12,7 +12,7 @@ namespace nebula { namespace meta { /** - * Remove some rows in custorm kv operations. + * Remove some rows in custom kv operations. * */ class RemoveProcessor : public BaseProcessor { public: diff --git a/src/meta/processors/parts/DropSpaceProcessor.cpp b/src/meta/processors/parts/DropSpaceProcessor.cpp index 4b47e00fd94..b36f3b0c4b4 100644 --- a/src/meta/processors/parts/DropSpaceProcessor.cpp +++ b/src/meta/processors/parts/DropSpaceProcessor.cpp @@ -94,11 +94,11 @@ void DropSpaceProcessor::process(const cpp2::DropSpaceReq& req) { lstIter->next(); } - // 5. Delete related statis data - auto statiskey = MetaKeyUtils::statsKey(spaceId); - deleteKeys.emplace_back(statiskey); + // 5. Delete related stats data + auto statskey = MetaKeyUtils::statsKey(spaceId); + deleteKeys.emplace_back(statskey); - // 6. Delte related fulltext index meta data + // 6. Delete related fulltext index meta data auto ftPrefix = MetaKeyUtils::fulltextIndexPrefix(); auto ftRet = doPrefix(ftPrefix); if (!nebula::ok(ftRet)) { diff --git a/src/meta/processors/parts/ListHostsProcessor.cpp b/src/meta/processors/parts/ListHostsProcessor.cpp index 49d174fb65a..ef1e115f249 100644 --- a/src/meta/processors/parts/ListHostsProcessor.cpp +++ b/src/meta/processors/parts/ListHostsProcessor.cpp @@ -68,7 +68,7 @@ void ListHostsProcessor::process(const cpp2::ListHostsReq& req) { * now(2020-04-29), assume all metad have same gitInfoSHA * this will change if some day * meta.thrift support interface like getHostStatus() - * which return a bunch of host infomation + * which return a bunch of host information * it's not necessary add this interface only for gitInfoSHA * */ nebula::cpp2::ErrorCode ListHostsProcessor::allMetaHostsStatus() { @@ -79,7 +79,7 @@ nebula::cpp2::ErrorCode ListHostsProcessor::allMetaHostsStatus() { return retCode; } auto metaPeers = nebula::value(errOrPart)->peers(); - // transform raft port to servre port + // transform raft port to severe port for (auto& metaHost : metaPeers) { metaHost = Utils::getStoreAddrFromRaftAddr(metaHost); } diff --git a/src/meta/processors/schema/GetEdgeProcessor.cpp b/src/meta/processors/schema/GetEdgeProcessor.cpp index 9550d6f68b4..da8c371ab3f 100644 --- a/src/meta/processors/schema/GetEdgeProcessor.cpp +++ b/src/meta/processors/schema/GetEdgeProcessor.cpp @@ -25,7 +25,7 @@ void GetEdgeProcessor::process(const cpp2::GetEdgeReq& req) { auto edgeType = nebula::value(edgeTypeRet); std::string schemaValue; - // Get the lastest version + // Get the latest version if (ver < 0) { auto edgePrefix = MetaKeyUtils::schemaEdgePrefix(spaceId, edgeType); auto ret = doPrefix(edgePrefix); diff --git a/src/meta/processors/schema/GetTagProcessor.cpp b/src/meta/processors/schema/GetTagProcessor.cpp index 2d1ed2fd35f..16af1b78f2e 100644 --- a/src/meta/processors/schema/GetTagProcessor.cpp +++ b/src/meta/processors/schema/GetTagProcessor.cpp @@ -25,7 +25,7 @@ void GetTagProcessor::process(const cpp2::GetTagReq& req) { auto tagId = nebula::value(tagIdRet); std::string schemaValue; - // Get the lastest version + // Get the latest version if (ver < 0) { auto tagPrefix = MetaKeyUtils::schemaTagPrefix(spaceId, tagId); auto ret = doPrefix(tagPrefix); diff --git a/src/meta/processors/zone/DropGroupProcessor.cpp b/src/meta/processors/zone/DropGroupProcessor.cpp index 287ed659ba9..50f40385fa4 100644 --- a/src/meta/processors/zone/DropGroupProcessor.cpp +++ b/src/meta/processors/zone/DropGroupProcessor.cpp @@ -24,7 +24,7 @@ void DropGroupProcessor::process(const cpp2::DropGroupReq& req) { return; } - // If any space rely on this group, it should not be droped. + // If any space rely on this group, it should not be dropped. auto retCode = checkSpaceDependency(groupName); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(retCode); diff --git a/src/meta/processors/zone/DropZoneProcessor.cpp b/src/meta/processors/zone/DropZoneProcessor.cpp index 0fc4691c6a9..a6377879b93 100644 --- a/src/meta/processors/zone/DropZoneProcessor.cpp +++ b/src/meta/processors/zone/DropZoneProcessor.cpp @@ -24,7 +24,7 @@ void DropZoneProcessor::process(const cpp2::DropZoneReq& req) { return; } - // If zone belong to any group, it should not be droped. + // If zone belong to any group, it should not be dropped. auto retCode = checkGroupDependency(zoneName); if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { handleErrorCode(retCode); diff --git a/src/meta/test/BalanceIntegrationTest.cpp b/src/meta/test/BalanceIntegrationTest.cpp index eb7c4c8a6a4..519862ef480 100644 --- a/src/meta/test/BalanceIntegrationTest.cpp +++ b/src/meta/test/BalanceIntegrationTest.cpp @@ -175,7 +175,7 @@ TEST(BalanceIntegrationTest, BalanceTest) { newMetaClient.get(), dataPath.c_str(), localIp, storagePort, true); LOG(INFO) << "Start a new storage server on " << storageAddr; } - LOG(INFO) << "Let's stop the last storage servcie " << storagePorts.back(); + LOG(INFO) << "Let's stop the last storage service " << storagePorts.back(); { metaClients.back()->stop(); serverContexts.back().reset(); @@ -203,7 +203,7 @@ TEST(BalanceIntegrationTest, BalanceTest) { int num = 0; std::string lastKey = ""; while (iter->valid()) { - // filter the multipule versions for data. + // filter the multiple versions for data. auto key = NebulaKeyUtils::keyWithNoVersion(iter->key()); if (lastKey == key) { iter->next(); diff --git a/src/meta/test/BalancerTest.cpp b/src/meta/test/BalancerTest.cpp index b18cf0016a7..2151d3f1341 100644 --- a/src/meta/test/BalancerTest.cpp +++ b/src/meta/test/BalancerTest.cpp @@ -1093,7 +1093,7 @@ TEST(BalanceTest, SpecifyMultiHostTest) { LOG(INFO) << "Rebalance finished!"; // In theory, there should be only 12 tasks, but in some environment, 13 tasks - // is generated. A parition is moved more than once from A -> B -> C, actually + // is generated. A partition is moved more than once from A -> B -> C, actually // A -> C is enough. verifyBalanceTask( kv, balancer.jobId_, BalanceTaskStatus::END, BalanceTaskResult::SUCCEEDED, partCount); @@ -1583,7 +1583,7 @@ TEST(BalanceTest, ManyHostsLeaderBalancePlanTest) { LeaderBalanceJobExecutor balancer( testJobId.fetch_add(1, std::memory_order_relaxed), kv, &client, {}); - // chcek several times if they are balanced + // check several times if they are balanced for (int count = 0; count < 1; count++) { HostLeaderMap hostLeaderMap; // all part will random choose a leader diff --git a/src/meta/test/ConfigManTest.cpp b/src/meta/test/ConfigManTest.cpp index 1e48d01ec5d..e802317f89c 100644 --- a/src/meta/test/ConfigManTest.cpp +++ b/src/meta/test/ConfigManTest.cpp @@ -188,7 +188,7 @@ TEST(ConfigManTest, ConfigProcessorTest) { updated.set_name("nested"); updated.set_type(cpp2::ConfigType::NESTED); updated.set_mode(cpp2::ConfigMode::MUTABLE); - // update from consle as format of update list + // update from console as format of update list updated.set_value("max_background_jobs=8,level0_file_num_compaction_trigger=10"); cpp2::SetConfigReq req; diff --git a/src/meta/test/GetStatsTest.cpp b/src/meta/test/GetStatsTest.cpp index e4631e8c442..bc10498acb8 100644 --- a/src/meta/test/GetStatsTest.cpp +++ b/src/meta/test/GetStatsTest.cpp @@ -122,19 +122,19 @@ TEST_F(GetStatsTest, StatsJob) { TestUtils::assembleSpace(kv_.get(), 1, 1); GraphSpaceID spaceId = 1; std::vector paras{"test_space"}; - JobDescription statisJob(12, cpp2::AdminCmd::STATS, paras); + JobDescription statsJob(12, cpp2::AdminCmd::STATS, paras); NiceMock adminClient; jobMgr->adminClient_ = &adminClient; - auto rc = jobMgr->save(statisJob.jobKey(), statisJob.jobVal()); + auto rc = jobMgr->save(statsJob.jobKey(), statsJob.jobVal()); ASSERT_EQ(rc, nebula::cpp2::ErrorCode::SUCCEEDED); { // Job is not executed, job status is QUEUE. // Stats data does not exist. - auto job1Ret = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); + auto job1Ret = JobDescription::loadJobDescription(statsJob.id_, kv_.get()); ASSERT_TRUE(nebula::ok(job1Ret)); auto job1 = nebula::value(job1Ret); - ASSERT_EQ(statisJob.id_, job1.id_); + ASSERT_EQ(statsJob.id_, job1.id_); ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.status_); cpp2::GetStatsReq req; @@ -145,7 +145,7 @@ TEST_F(GetStatsTest, StatsJob) { auto resp = std::move(f).get(); ASSERT_NE(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - // Directly find statis data in kvstore, statis data does not exist. + // Directly find stats data in kvstore, stats data does not exist. auto key = MetaKeyUtils::statsKey(spaceId); std::string val; auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); @@ -157,27 +157,27 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(retsav, nebula::cpp2::ErrorCode::SUCCEEDED); } - // Run statis job, job finished. - // Insert running status statis data in prepare function of runJobInternal. - // Update statis data to finished or failed status in finish function of + // Run stats job, job finished. + // Insert running status stats data in prepare function of runJobInternal. + // Update stats data to finished or failed status in finish function of // runJobInternal. - auto result = jobMgr->runJobInternal(statisJob, JobManager::JbOp::ADD); + auto result = jobMgr->runJobInternal(statsJob, JobManager::JbOp::ADD); ASSERT_TRUE(result); // JobManager does not set the job finished status in RunJobInternal function. - // But set statis data. - statisJob.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->save(statisJob.jobKey(), statisJob.jobVal()); - auto jobId = statisJob.getJobId(); + // But set stats data. + statsJob.setStatus(cpp2::JobStatus::FINISHED); + jobMgr->save(statsJob.jobKey(), statsJob.jobVal()); + auto jobId = statsJob.getJobId(); auto statsKey = MetaKeyUtils::statsKey(spaceId); auto tempKey = toTempKey(spaceId, jobId); copyData(kv_.get(), 0, 0, statsKey, tempKey); jobMgr->jobFinished(jobId, cpp2::JobStatus::FINISHED); { - auto job2Ret = JobDescription::loadJobDescription(statisJob.id_, kv_.get()); + auto job2Ret = JobDescription::loadJobDescription(statsJob.id_, kv_.get()); ASSERT_TRUE(nebula::ok(job2Ret)); auto job2 = nebula::value(job2Ret); - ASSERT_EQ(statisJob.id_, job2.id_); + ASSERT_EQ(statsJob.id_, job2.id_); ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.status_); cpp2::GetStatsReq req; @@ -198,7 +198,7 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(0, statsItem.get_space_vertices()); ASSERT_EQ(0, statsItem.get_space_edges()); - // Directly find statis data in kvstore, statis data exists. + // Directly find stats data in kvstore, stats data exists. auto key = MetaKeyUtils::statsKey(spaceId); std::string val; auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); @@ -212,22 +212,22 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(0, statsItem1.get_space_edges()); } - // Execute new statis job in same space. + // Execute new stats job in same space. std::vector paras1{"test_space"}; - JobDescription statisJob2(13, cpp2::AdminCmd::STATS, paras1); - auto rc2 = jobMgr->save(statisJob2.jobKey(), statisJob2.jobVal()); + JobDescription statsJob2(13, cpp2::AdminCmd::STATS, paras1); + auto rc2 = jobMgr->save(statsJob2.jobKey(), statsJob2.jobVal()); ASSERT_EQ(rc2, nebula::cpp2::ErrorCode::SUCCEEDED); { // Job is not executed, job status is QUEUE. // Stats data exists, but it is the result of the last stats job // execution. - auto job1Ret = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); + auto job1Ret = JobDescription::loadJobDescription(statsJob2.id_, kv_.get()); ASSERT_TRUE(nebula::ok(job1Ret)); auto job1 = nebula::value(job1Ret); - ASSERT_EQ(statisJob2.id_, job1.id_); + ASSERT_EQ(statsJob2.id_, job1.id_); ASSERT_EQ(cpp2::JobStatus::QUEUE, job1.status_); - // Success, but statis data is the result of the last statis job. + // Success, but stats data is the result of the last stats job. cpp2::GetStatsReq req; req.set_space_id(spaceId); auto* processor = GetStatsProcessor::instance(kv_.get()); @@ -243,7 +243,7 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(0, statsItem.get_space_vertices()); ASSERT_EQ(0, statsItem.get_space_edges()); - // Directly find statis data in kvstore, statis data exists. + // Directly find stats data in kvstore, stats data exists. auto key = MetaKeyUtils::statsKey(spaceId); std::string val; auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); @@ -262,7 +262,7 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(retsav, nebula::cpp2::ErrorCode::SUCCEEDED); } - // Remove statis data. + // Remove stats data. { auto key = MetaKeyUtils::statsKey(spaceId); folly::Baton baton; @@ -277,7 +277,7 @@ TEST_F(GetStatsTest, StatsJob) { baton.wait(); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, retCode); - // Directly find statis data in kvstore, statis data does not exist. + // Directly find stats data in kvstore, stats data does not exist. std::string val; auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); ASSERT_NE(nebula::cpp2::ErrorCode::SUCCEEDED, ret); @@ -291,13 +291,13 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_NE(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } - // Run statis job. - // Insert running status statis data in prepare function of runJobInternal. - // Update statis data to finished or failed status in finish function of + // Run stats job. + // Insert running status stats data in prepare function of runJobInternal. + // Update stats data to finished or failed status in finish function of // runJobInternal. - auto result2 = jobMgr->runJobInternal(statisJob2, JobManager::JbOp::ADD); + auto result2 = jobMgr->runJobInternal(statsJob2, JobManager::JbOp::ADD); - auto jobId2 = statisJob2.getJobId(); + auto jobId2 = statsJob2.getJobId(); auto statsKey2 = MetaKeyUtils::statsKey(spaceId); auto tempKey2 = toTempKey(spaceId, jobId2); @@ -306,15 +306,15 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_TRUE(result2); // JobManager does not set the job finished status in RunJobInternal function. - // But set statis data. - statisJob2.setStatus(cpp2::JobStatus::FINISHED); - jobMgr->save(statisJob2.jobKey(), statisJob2.jobVal()); + // But set stats data. + statsJob2.setStatus(cpp2::JobStatus::FINISHED); + jobMgr->save(statsJob2.jobKey(), statsJob2.jobVal()); { - auto job2Ret = JobDescription::loadJobDescription(statisJob2.id_, kv_.get()); + auto job2Ret = JobDescription::loadJobDescription(statsJob2.id_, kv_.get()); ASSERT_TRUE(nebula::ok(job2Ret)); auto job2 = nebula::value(job2Ret); - ASSERT_EQ(statisJob2.id_, job2.id_); + ASSERT_EQ(statsJob2.id_, job2.id_); ASSERT_EQ(cpp2::JobStatus::FINISHED, job2.status_); cpp2::GetStatsReq req; @@ -332,7 +332,7 @@ TEST_F(GetStatsTest, StatsJob) { ASSERT_EQ(0, statsItem.get_space_vertices()); ASSERT_EQ(0, statsItem.get_space_edges()); - // Directly find statis data in kvstore, statis data exists. + // Directly find stats data in kvstore, stats data exists. auto key = MetaKeyUtils::statsKey(spaceId); std::string val; auto ret = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &val); @@ -368,7 +368,7 @@ TEST_F(GetStatsTest, MockSingleMachineTest) { NiceMock adminClient; jobMgr->adminClient_ = &adminClient; - // add statis job1 + // add stats job1 JobID jobId1 = 1; std::vector paras{"test_space"}; JobDescription job1(jobId1, cpp2::AdminCmd::STATS, paras); @@ -417,7 +417,7 @@ TEST_F(GetStatsTest, MockSingleMachineTest) { ASSERT_EQ(200, statsItem.get_space_edges()); } - // add statis job2 of same space + // add stats job2 of same space JobID jobId2 = 2; JobDescription job2(jobId2, cpp2::AdminCmd::STATS, paras); jobMgr->addJob(job2, &adminClient); @@ -483,7 +483,7 @@ TEST_F(GetStatsTest, MockMultiMachineTest) { NiceMock adminClient; jobMgr->adminClient_ = &adminClient; - // add statis job + // add stats job JobID jobId = 1; std::vector paras{"test_space"}; JobDescription job(jobId, cpp2::AdminCmd::STATS, paras); diff --git a/src/meta/test/JobManagerTest.cpp b/src/meta/test/JobManagerTest.cpp index 855b8563cdc..9c910486c5c 100644 --- a/src/meta/test/JobManagerTest.cpp +++ b/src/meta/test/JobManagerTest.cpp @@ -82,7 +82,7 @@ TEST_F(JobManagerTest, addJob) { } TEST_F(JobManagerTest, AddRebuildTagIndexJob) { - // For preventting job schedule in JobManager + // For preventing job schedule in JobManager jobMgr->status_ = JobManager::JbmgrStatus::STOPPED; std::vector paras{"tag_index_name", "test_space"}; @@ -94,7 +94,7 @@ TEST_F(JobManagerTest, AddRebuildTagIndexJob) { } TEST_F(JobManagerTest, AddRebuildEdgeIndexJob) { - // For preventting job schedule in JobManager + // For preventing job schedule in JobManager jobMgr->status_ = JobManager::JbmgrStatus::STOPPED; std::vector paras{"edge_index_name", "test_space"}; @@ -106,7 +106,7 @@ TEST_F(JobManagerTest, AddRebuildEdgeIndexJob) { } TEST_F(JobManagerTest, StatsJob) { - // For preventting job schedule in JobManager + // For preventing job schedule in JobManager jobMgr->status_ = JobManager::JbmgrStatus::STOPPED; std::vector paras{"test_space"}; @@ -127,7 +127,7 @@ TEST_F(JobManagerTest, StatsJob) { } TEST_F(JobManagerTest, JobPriority) { - // For preventting job schedule in JobManager + // For preventing job schedule in JobManager jobMgr->status_ = JobManager::JbmgrStatus::STOPPED; ASSERT_EQ(0, jobMgr->jobSize()); @@ -162,7 +162,7 @@ TEST_F(JobManagerTest, JobPriority) { } TEST_F(JobManagerTest, JobDeduplication) { - // For preventting job schedule in JobManager + // For preventing job schedule in JobManager jobMgr->status_ = JobManager::JbmgrStatus::STOPPED; ASSERT_EQ(0, jobMgr->jobSize()); diff --git a/src/meta/test/MetaClientTest.cpp b/src/meta/test/MetaClientTest.cpp index 2b6d791838b..a9e6d6f4723 100644 --- a/src/meta/test/MetaClientTest.cpp +++ b/src/meta/test/MetaClientTest.cpp @@ -2019,7 +2019,7 @@ TEST(MetaClientTest, Config) { configs = std::move(resp).value(); EXPECT_EQ(configs[0].get_value(), Value(3)); } - // Just avoid memory leak error of clang asan. to waitting asynchronous thread + // Just avoid memory leak error of clang asan. to waiting asynchronous thread // done. sleep(FLAGS_heartbeat_interval_secs * 5); } diff --git a/src/meta/test/ProcessorTest.cpp b/src/meta/test/ProcessorTest.cpp index 4e68688facf..49e252d53d4 100644 --- a/src/meta/test/ProcessorTest.cpp +++ b/src/meta/test/ProcessorTest.cpp @@ -897,7 +897,7 @@ TEST(ProcessorTest, CreateTagTest) { cpp2::CreateTagReq req; req.set_space_id(1); - req.set_tag_name("tag_type_mismatche"); + req.set_tag_name("tag_type_mismatch"); req.set_schema(std::move(schemaWithDefault)); auto* processor = CreateTagProcessor::instance(kv.get()); auto f = processor->getFuture(); @@ -921,7 +921,7 @@ TEST(ProcessorTest, CreateTagTest) { cpp2::CreateTagReq req; req.set_space_id(1); - req.set_tag_name("tag_value_mismatche"); + req.set_tag_name("tag_value_mismatch"); req.set_schema(std::move(schemaWithDefault)); auto* processor = CreateTagProcessor::instance(kv.get()); auto f = processor->getFuture(); @@ -1088,7 +1088,7 @@ TEST(ProcessorTest, CreateEdgeTest) { cpp2::CreateEdgeReq req; req.set_space_id(1); - req.set_edge_name("edge_with_defaule"); + req.set_edge_name("edge_with_default"); req.set_schema(std::move(schemaWithDefault)); auto* processor = CreateEdgeProcessor::instance(kv.get()); auto f = processor->getFuture(); @@ -1105,7 +1105,7 @@ TEST(ProcessorTest, CreateEdgeTest) { cpp2::CreateEdgeReq req; req.set_space_id(1); - req.set_edge_name("edge_type_mismatche"); + req.set_edge_name("edge_type_mismatch"); req.set_schema(std::move(schemaWithDefault)); auto* processor = CreateEdgeProcessor::instance(kv.get()); auto f = processor->getFuture(); diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index c774e724d1f..f0f0af9500f 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -215,7 +215,7 @@ class SpaceOptItem final { if (isString()) { return asString(); } else { - LOG(ERROR) << "collate value illage."; + LOG(ERROR) << "collate value illegal."; return ""; } } @@ -224,7 +224,7 @@ class SpaceOptItem final { if (isString()) { return asString(); } else { - LOG(ERROR) << "group name value illage."; + LOG(ERROR) << "group name value illegal."; return ""; } } @@ -591,10 +591,10 @@ class ShowSessionsSentence final : public Sentence { explicit ShowSessionsSentence(SessionID sessionId) { kind_ = Kind::kShowSessions; sessionId_ = sessionId; - setSeesionId_ = true; + setSessionId_ = true; } - bool isSetSessionID() const { return setSeesionId_; } + bool isSetSessionID() const { return setSessionId_; } SessionID getSessionID() const { return sessionId_; } @@ -602,7 +602,7 @@ class ShowSessionsSentence final : public Sentence { private: SessionID sessionId_{0}; - bool setSeesionId_{false}; + bool setSessionId_{false}; }; class ShowQueriesSentence final : public Sentence { diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 9322db81431..7508eb588e8 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -387,7 +387,7 @@ static constexpr size_t kCommentLengthLimit = 256; %type opt_if_not_exists %type opt_if_exists -%type opt_with_properites +%type opt_with_properties %left QM COLON %left KW_OR KW_XOR @@ -867,7 +867,7 @@ predicate_expression | KW_EXISTS L_PAREN expression R_PAREN { if ($3->kind() != Expression::Kind::kLabelAttribute && $3->kind() != Expression::Kind::kAttribute && $3->kind() != Expression::Kind::kSubscript) { - throw nebula::GraphParser::syntax_error(@3, "The exists only accept LabelAttribe, Attribute and Subscript"); + throw nebula::GraphParser::syntax_error(@3, "The exists only accept LabelAttribute, Attribute and Subscript"); } $$ = PredicateExpression::make(qctx->objPool(), "exists", "", $3, nullptr); } @@ -1509,7 +1509,7 @@ match_clause delete($3); throw nebula::GraphParser::syntax_error(@3, "Invalid use of aggregating function in this context."); } else { - $$ = new MatchClause($2, $3, false/*optinal*/); + $$ = new MatchClause($2, $3, false/*optional*/); } } | KW_OPTIONAL KW_MATCH match_path where_clause { @@ -1942,7 +1942,7 @@ text_search_expression } ; - // TODO : unfiy the text_search_expression into expression in the future + // TODO : unify the text_search_expression into expression in the future // The current version only support independent text_search_expression for lookup_sentence lookup_where_clause : %empty { $$ = nullptr; } @@ -2066,7 +2066,7 @@ fetch_sentence ; find_path_sentence - : KW_FIND KW_ALL KW_PATH opt_with_properites from_clause to_clause over_clause where_clause find_path_upto_clause { + : KW_FIND KW_ALL KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { auto *s = new FindPathSentence(false, $4, false); s->setFrom($5); s->setTo($6); @@ -2075,7 +2075,7 @@ find_path_sentence s->setStep($9); $$ = s; } - | KW_FIND KW_SHORTEST KW_PATH opt_with_properites from_clause to_clause over_clause where_clause find_path_upto_clause { + | KW_FIND KW_SHORTEST KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { auto *s = new FindPathSentence(true, $4, false); s->setFrom($5); s->setTo($6); @@ -2084,7 +2084,7 @@ find_path_sentence s->setStep($9); $$ = s; } - | KW_FIND KW_NOLOOP KW_PATH opt_with_properites from_clause to_clause over_clause where_clause find_path_upto_clause { + | KW_FIND KW_NOLOOP KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { auto *s = new FindPathSentence(false, $4, true); s->setFrom($5); s->setTo($6); @@ -2095,7 +2095,7 @@ find_path_sentence } ; -opt_with_properites +opt_with_properties : %empty { $$ = false; } | KW_WITH KW_PROP { $$ = true; } ; @@ -2153,7 +2153,7 @@ both_in_out_clause | KW_BOTH over_edges { $$ = new BothInOutClause($2, BoundClause::BOTH); } get_subgraph_sentence - : KW_GET KW_SUBGRAPH opt_with_properites step_clause from_clause in_bound_clause out_bound_clause both_in_out_clause yield_clause { + : KW_GET KW_SUBGRAPH opt_with_properties step_clause from_clause in_bound_clause out_bound_clause both_in_out_clause yield_clause { $$ = new GetSubgraphSentence($3, $4, $5, $6, $7, $8, $9); } diff --git a/src/parser/test/ScannerTest.cpp b/src/parser/test/ScannerTest.cpp index d9e7009fdb4..f842233d960 100644 --- a/src/parser/test/ScannerTest.cpp +++ b/src/parser/test/ScannerTest.cpp @@ -107,10 +107,10 @@ TEST(Scanner, Basic) { }; \ GraphScanner lexer; \ lexer.setReadBuffer(input); \ - nebula::GraphParser::semantic_type dumyyylval; \ - nebula::GraphParser::location_type dumyyyloc; \ + nebula::GraphParser::semantic_type dummyyylval; \ + nebula::GraphParser::location_type dummyyyloc; \ try { \ - auto token = lexer.yylex(&dumyyylval, &dumyyyloc); \ + auto token = lexer.yylex(&dummyyylval, &dummyyyloc); \ if (token != 0) { \ return AssertionFailure() << "Lexical error should've " \ << "happened for `" << STR << "'"; \ diff --git a/src/storage/BaseProcessor-inl.h b/src/storage/BaseProcessor-inl.h index af13b402b98..acfb5bc2474 100644 --- a/src/storage/BaseProcessor-inl.h +++ b/src/storage/BaseProcessor-inl.h @@ -166,21 +166,21 @@ StatusOr BaseProcessor::encodeRowVal(const meta::NebulaSchema for (size_t i = 0; i < propNames.size(); i++) { wRet = rowWrite.setValue(propNames[i], props[i]); if (wRet != WriteResult::SUCCEEDED) { - return Status::Error("Add field faild"); + return Status::Error("Add field failed"); } } } else { for (size_t i = 0; i < props.size(); i++) { wRet = rowWrite.setValue(i, props[i]); if (wRet != WriteResult::SUCCEEDED) { - return Status::Error("Add field faild"); + return Status::Error("Add field failed"); } } } wRet = rowWrite.finish(); if (wRet != WriteResult::SUCCEEDED) { - return Status::Error("Add field faild"); + return Status::Error("Add field failed"); } return std::move(rowWrite).moveEncodedStr(); diff --git a/src/storage/StorageFlags.cpp b/src/storage/StorageFlags.cpp index fc50458fc68..2aa3a35f4bc 100644 --- a/src/storage/StorageFlags.cpp +++ b/src/storage/StorageFlags.cpp @@ -40,7 +40,7 @@ DEFINE_string(reader_handlers_type, "cpu", "Type of reader handlers, options: cp DEFINE_bool(trace_toss, false, "output verbose log of toss"); -DEFINE_int32(max_edge_returned_per_vertex, INT_MAX, "Max edge number returnred searching vertex"); +DEFINE_int32(max_edge_returned_per_vertex, INT_MAX, "Max edge number returned searching vertex"); DEFINE_bool(query_concurrently, false, diff --git a/src/storage/admin/AdminProcessor.h b/src/storage/admin/AdminProcessor.h index 0d3ea444dc0..bd55ef10955 100644 --- a/src/storage/admin/AdminProcessor.h +++ b/src/storage/admin/AdminProcessor.h @@ -87,7 +87,7 @@ class TransLeaderProcessor : public BaseProcessor { onFinished(); return; } else if (leader != HostAddr("", 0)) { - LOG(INFO) << "I am choosen as leader of space " << spaceId << " part " << partId + LOG(INFO) << "I am chosen as leader of space " << spaceId << " part " << partId << " again!"; pushResultCode(nebula::cpp2::ErrorCode::E_TRANSFER_LEADER_FAILED, partId); onFinished(); diff --git a/src/storage/admin/AdminTask.h b/src/storage/admin/AdminTask.h index d434b1a67d0..bbc99df3f1e 100644 --- a/src/storage/admin/AdminTask.h +++ b/src/storage/admin/AdminTask.h @@ -85,9 +85,9 @@ class AdminTask { virtual int getTaskId() { return ctx_.taskId_; } - virtual void setConcurrentReq(int concurrenctReq) { - if (concurrenctReq > 0) { - ctx_.concurrentReq_ = concurrenctReq; + virtual void setConcurrentReq(int concurrentReq) { + if (concurrentReq > 0) { + ctx_.concurrentReq_ = concurrentReq; } } diff --git a/src/storage/admin/AdminTaskManager.cpp b/src/storage/admin/AdminTaskManager.cpp index bc7f92e735b..5e444888d2e 100644 --- a/src/storage/admin/AdminTaskManager.cpp +++ b/src/storage/admin/AdminTaskManager.cpp @@ -19,7 +19,7 @@ namespace nebula { namespace storage { bool AdminTaskManager::init() { - LOG(INFO) << "max concurrenct subtasks: " << FLAGS_max_concurrent_subtasks; + LOG(INFO) << "max concurrent subtasks: " << FLAGS_max_concurrent_subtasks; auto threadFactory = std::make_shared("TaskManager"); pool_ = std::make_unique(FLAGS_max_concurrent_subtasks, threadFactory); bgThread_ = std::make_unique(); diff --git a/src/storage/admin/RebuildIndexTask.cpp b/src/storage/admin/RebuildIndexTask.cpp index c8747374c64..195ee5eaaa0 100644 --- a/src/storage/admin/RebuildIndexTask.cpp +++ b/src/storage/admin/RebuildIndexTask.cpp @@ -19,8 +19,8 @@ RebuildIndexTask::RebuildIndexTask(StorageEnv* env, TaskContext&& ctx) // Rebuild index rate is limited to FLAGS_rebuild_index_part_rate_limit * SubTaskConcurrency. As // for default configuration in a 3 replica cluster, send rate is 512Kb for a partition. From a // global perspective, the leaders are distributed evenly, so both send and recv traffic will be - // 1Mb (512 * 2 peers). Muliplied by the subtasks concurrency, the total send/recv traffic will be - // 10Mb, which is non-trival. + // 1Mb (512 * 2 peers). Multiplied by the subtasks concurrency, the total send/recv traffic will + // be 10Mb, which is non-trival. LOG(INFO) << "Rebuild index task is rate limited to " << FLAGS_rebuild_index_part_rate_limit << " for each subtask by default"; } @@ -31,8 +31,8 @@ ErrorOr> RebuildIndexTask::ge auto parts = *ctx_.parameters_.parts_ref(); IndexItems items; - if (!ctx_.parameters_.task_specfic_paras_ref().has_value() || - (*ctx_.parameters_.task_specfic_paras_ref()).empty()) { + if (!ctx_.parameters_.task_specific_paras_ref().has_value() || + (*ctx_.parameters_.task_specific_paras_ref()).empty()) { auto itemsRet = getIndexes(space_); if (!itemsRet.ok()) { LOG(ERROR) << "Indexes not found"; @@ -41,7 +41,7 @@ ErrorOr> RebuildIndexTask::ge items = std::move(itemsRet).value(); } else { - for (const auto& index : *ctx_.parameters_.task_specfic_paras_ref()) { + for (const auto& index : *ctx_.parameters_.task_specific_paras_ref()) { auto indexID = folly::to(index); auto indexRet = getIndex(space_, indexID); if (!indexRet.ok()) { @@ -78,7 +78,7 @@ nebula::cpp2::ErrorCode RebuildIndexTask::invoke(GraphSpaceID space, PartitionID part, const IndexItems& items) { auto rateLimiter = std::make_unique(); - // TaskMananger will make sure that there won't be cocurrent invoke of a given part + // TaskManager will make sure that there won't be cocurrent invoke of a given part auto result = removeLegacyLogs(space, part); if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Remove legacy logs at part: " << part << " failed"; diff --git a/src/storage/admin/StatsTask.cpp b/src/storage/admin/StatsTask.cpp index cf4d56811a6..beeeaed347b 100644 --- a/src/storage/admin/StatsTask.cpp +++ b/src/storage/admin/StatsTask.cpp @@ -94,7 +94,7 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, } auto partitionNum = partitionNumRet.value(); - LOG(INFO) << "Start statis task"; + LOG(INFO) << "Start stats task"; CHECK_NOTNULL(env_->kvstore_); auto vertexPrefix = NebulaKeyUtils::tagPrefix(part); std::unique_ptr vertexIter; @@ -131,7 +131,7 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, VertexID lastVertexId = ""; - // Only statis valid vetex data, no multi version + // Only stats valid vertex data, no multi version // For example // Vid tagId // 1 1 @@ -161,7 +161,7 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, vertexIter->next(); } - // Only statis valid edge data, no multi version + // Only stats valid edge data, no multi version // For example // src edgetype rank dst // 1 1 1 2 @@ -223,8 +223,8 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, statsItem.set_space_vertices(spaceVertices); statsItem.set_space_edges(spaceEdges); - using Correlativiyties = std::vector; - Correlativiyties positiveCorrelativity; + using Correlativities = std::vector; + Correlativities positiveCorrelativity; for (const auto& entry : positiveRelevancy) { nebula::meta::cpp2::Correlativity partProportion; partProportion.set_part_id(entry.first); @@ -233,7 +233,7 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, positiveCorrelativity.emplace_back(std::move(partProportion)); } - Correlativiyties negativeCorrelativity; + Correlativities negativeCorrelativity; for (const auto& entry : negativeRelevancy) { nebula::meta::cpp2::Correlativity partProportion; partProportion.set_part_id(entry.first); @@ -252,13 +252,13 @@ nebula::cpp2::ErrorCode StatsTask::genSubTask(GraphSpaceID spaceId, negativeCorrelativity.end(), [&](const auto& l, const auto& r) { return *l.proportion_ref() < *r.proportion_ref(); }); - std::unordered_map positivePartCorrelativiyties; - positivePartCorrelativiyties[part] = positiveCorrelativity; - statsItem.set_positive_part_correlativity(std::move(positivePartCorrelativiyties)); + std::unordered_map positivePartCorrelativities; + positivePartCorrelativities[part] = positiveCorrelativity; + statsItem.set_positive_part_correlativity(std::move(positivePartCorrelativities)); - std::unordered_map negativePartCorrelativiyties; - negativePartCorrelativiyties[part] = negativeCorrelativity; - statsItem.set_negative_part_correlativity(std::move(negativePartCorrelativiyties)); + std::unordered_map negativePartCorrelativities; + negativePartCorrelativities[part] = negativeCorrelativity; + statsItem.set_negative_part_correlativity(std::move(negativePartCorrelativities)); statistics_.emplace(part, std::move(statsItem)); LOG(INFO) << "Stats task finished"; diff --git a/src/storage/exec/AggregateNode.h b/src/storage/exec/AggregateNode.h index 539deeaea2e..bdf1fcba26e 100644 --- a/src/storage/exec/AggregateNode.h +++ b/src/storage/exec/AggregateNode.h @@ -30,7 +30,7 @@ struct PropStat { // some stat of all valid edges of a vertex. It could be used in ScanVertex or // ScanEdge later. The stat is collected during we iterate over edges via // `next`, so if you want to get the final result, be sure to call -// `calculateStat` and then retrieve the reuslt +// `calculateStat` and then retrieve the result template class AggregateNode : public IterateNode { public: diff --git a/src/storage/exec/FilterNode.h b/src/storage/exec/FilterNode.h index 959b66962d6..b70df4d389b 100644 --- a/src/storage/exec/FilterNode.h +++ b/src/storage/exec/FilterNode.h @@ -22,7 +22,7 @@ data, but not both. As for GetNeighbors, it will have filter that involves both tag and edge expression. In that case, FilterNode has a upstream of HashJoinNode, which will -keep popping out edge data. All tage data has been put into ExpressionContext +keep popping out edge data. All tag data has been put into ExpressionContext before FilterNode is doExecuted. By that means, it can check the filter of tag + edge. */ diff --git a/src/storage/exec/HashJoinNode.h b/src/storage/exec/HashJoinNode.h index 3b3dc22ef03..5e6aad1ece1 100644 --- a/src/storage/exec/HashJoinNode.h +++ b/src/storage/exec/HashJoinNode.h @@ -15,7 +15,7 @@ namespace nebula { namespace storage { -// HashJoinNode has input of serveral TagNode and EdgeNode, the EdgeNode is +// HashJoinNode has input of several TagNode and EdgeNode, the EdgeNode is // several SingleEdgeNode of different edge types all edges of a vertex. The // output would be the result of tag, it is a List, each cell save a list of // property values, if tag not found, it will be a empty value. Also it will diff --git a/src/storage/exec/IndexScanNode.h b/src/storage/exec/IndexScanNode.h index fe8838aa4f8..ac600d641fb 100644 --- a/src/storage/exec/IndexScanNode.h +++ b/src/storage/exec/IndexScanNode.h @@ -44,16 +44,16 @@ namespace storage { * Member: * `indexId_` : index_ in this Node to access * `partId_` : part to access.It will be modify while `doExecute` - * `index_` : index defination + * `index_` : index definition * `indexNullable_` : if index contain nullable field or not * `columnHints_` : * `path_` : - * `iter_` : current kvstore iterator.It while be reseted `doExecute` and iterated + * `iter_` : current kvstore iterator.It while be reset `doExecute` and iterated * during `doNext` * `kvstore_` : server kvstore * `requiredColumns_` : row format that `doNext` needs to return * `requiredAndHintColumns_`: columns that `decodeFromBase` needs to decode - * `ttlProps` : ttl properties `needAccesBase_` : if need + * `ttlProps` : ttl properties `needAccessBase_` : if need * `fatalOnBaseNotFound_` : for debug * * Function: @@ -76,13 +76,13 @@ namespace storage { * `Path` not only generate the key to access, but also `qualified` whether the key complies with * the columnhint constraint or not.For example, if there is a truncated string index, we cannot * simply compare bytes to determine whether the current key complies with the columnhints - * constraint, the result of `qulified(bytes)` should be `UNCERTAIN` and `IndexScanNode` will - * access base data then `Path` reconfirm `ColumnHint` constraint by `qulified(RowData)`. In + * constraint, the result of `qualified(bytes)` should be `UNCERTAIN` and `IndexScanNode` will + * access base data then `Path` reconfirm `ColumnHint` constraint by `qualified(RowData)`. In * addition to the above examples, there are other cases to deal with.`Path` and it's derive class * will dynamic different strategy by `ColumnHint`,`IndexItem`,and `Schema`.All strategy will be * added to `QFList_`(QualifiedFunctionList) during `buildKey`, and executed during `qualified`. * - * `Path` whild be reseted when `IndexScanNode` execute on a new part. + * `Path` will be reset when `IndexScanNode` execute on a new part. * * It should be noted that the range generated by `rangepath` is a certain left included and right * excluded interval,like [startKey_, endKey_), although `ColumnHint` may have many different @@ -112,8 +112,8 @@ namespace storage { * * Function: * `make` : construct `PrefixPath` or `RangePath` according to `hints` - * `qualified(StringPiece)` : qulified key by bytes - * `qualified(Map)` : qulified row by value + * `qualified(StringPiece)` : qualified key by bytes + * `qualified(Map)` : qualified row by value * `resetPart` : reset current partitionID and reset `iter_` * `encodeValue` : encode a Value to bytes * @@ -247,7 +247,7 @@ class QualifiedStrategy { * * Args: * `dedupSuffixLength` : If indexed schema is a tag, `dedupSuffixLength` should be vid.len; - * If the indexed schema is an edge, `dedupSuffixLength` shoule be + * If the indexed schema is an edge, `dedupSuffixLength` should be * srcId.len+sizeof(rank)+dstId.len * Return: * When suffix first appears, the function returns `COMPATIBLE`; otherwise, the function returns @@ -286,7 +286,7 @@ class QualifiedStrategy { * `UNCERTAIN`, and (ab)c > aa is COMPATIBLE. * * Args: - * `LEorGE` : It's an assit arg. true means LE and false means GE. + * `LEorGE` : It's an assist arg. true means LE and false means GE. * `val` : Truncated `String` index value,whose length has been define in `IndexItem`. * `keyStartPos` : The position in indexKey where start compare with `val` * diff --git a/src/storage/exec/IndexSelectionNode.h b/src/storage/exec/IndexSelectionNode.h index a134ecf890d..04dd8694638 100644 --- a/src/storage/exec/IndexSelectionNode.h +++ b/src/storage/exec/IndexSelectionNode.h @@ -59,7 +59,7 @@ class IndexSelectionNode : public IndexNode { } Expression *expr_; Map colPos_; - // TODO(hs.zhang): `ExprContext` could be moved out later if we unify the valcano in go/lookup + // TODO(hs.zhang): `ExprContext` could be moved out later if we unify the volcano in go/lookup class ExprContext : public ExpressionContext { public: explicit ExprContext(const Map &colPos) : colPos_(colPos) {} diff --git a/src/storage/exec/IndexVertexScanNode.cpp b/src/storage/exec/IndexVertexScanNode.cpp index aa87a063301..a2a61c42f75 100644 --- a/src/storage/exec/IndexVertexScanNode.cpp +++ b/src/storage/exec/IndexVertexScanNode.cpp @@ -15,9 +15,9 @@ IndexVertexScanNode::IndexVertexScanNode(const IndexVertexScanNode& node) IndexVertexScanNode::IndexVertexScanNode(RuntimeContext* context, IndexID indexId, - const std::vector& clolumnHint, + const std::vector& columnHint, ::nebula::kvstore::KVStore* kvstore) - : IndexScanNode(context, "IndexVertexScanNode", indexId, clolumnHint, kvstore) { + : IndexScanNode(context, "IndexVertexScanNode", indexId, columnHint, kvstore) { getIndex = std::function([this](std::shared_ptr& index) { auto env = this->context_->env(); auto indexMgr = env->indexMan_; diff --git a/src/storage/exec/IndexVertexScanNode.h b/src/storage/exec/IndexVertexScanNode.h index fea56a19adb..48be02e2d1a 100644 --- a/src/storage/exec/IndexVertexScanNode.h +++ b/src/storage/exec/IndexVertexScanNode.h @@ -26,7 +26,7 @@ class IndexVertexScanNode final : public IndexScanNode { IndexVertexScanNode(const IndexVertexScanNode& node); IndexVertexScanNode(RuntimeContext* context, IndexID indexId, - const std::vector& clolumnHint, + const std::vector& columnHint, ::nebula::kvstore::KVStore* kvstore); ::nebula::cpp2::ErrorCode init(InitContext& ctx) override; std::unique_ptr copy() override; diff --git a/src/storage/exec/StoragePlan.h b/src/storage/exec/StoragePlan.h index 181e18c4c5a..0c48daa0dc8 100644 --- a/src/storage/exec/StoragePlan.h +++ b/src/storage/exec/StoragePlan.h @@ -14,11 +14,11 @@ namespace nebula { namespace storage { /* -Origined from folly::FutureDAG, not thread-safe. +Originated from folly::FutureDAG, not thread-safe. The StoragePlan contains a set of RelNode, all you need to do is define a RelNode, add it to plan by calling addNode, which will return the index of the -RelNode in this plan. The denpendencies between different nodes is defined by +RelNode in this plan. The dependencies between different nodes is defined by calling addDependency in RelNode. To run the plan, call the go method, you could get the final result. diff --git a/src/storage/exec/UpdateNode.h b/src/storage/exec/UpdateNode.h index 21fa7153ee0..0b6e3abe36d 100644 --- a/src/storage/exec/UpdateNode.h +++ b/src/storage/exec/UpdateNode.h @@ -331,7 +331,7 @@ class UpdateTagNode : public UpdateNode { for (auto& e : props_) { auto wRet = rowWriter_->setValue(e.first, e.second); if (wRet != WriteResult::SUCCEEDED) { - LOG(ERROR) << "Add field faild "; + LOG(ERROR) << "Add field failed "; return folly::none; } } @@ -340,7 +340,7 @@ class UpdateTagNode : public UpdateNode { auto wRet = rowWriter_->finish(); if (wRet != WriteResult::SUCCEEDED) { - LOG(ERROR) << "Add field faild "; + LOG(ERROR) << "Add field failed "; return folly::none; } @@ -655,7 +655,7 @@ class UpdateEdgeNode : public UpdateNode { for (auto& e : props_) { auto wRet = rowWriter_->setValue(e.first, e.second); if (wRet != WriteResult::SUCCEEDED) { - VLOG(1) << "Add field faild "; + VLOG(1) << "Add field failed "; return folly::none; } } @@ -664,7 +664,7 @@ class UpdateEdgeNode : public UpdateNode { auto wRet = rowWriter_->finish(); if (wRet != WriteResult::SUCCEEDED) { - VLOG(1) << "Add field faild "; + VLOG(1) << "Add field failed "; return folly::none; } diff --git a/src/storage/query/GetPropProcessor.cpp b/src/storage/query/GetPropProcessor.cpp index 3d57cd72d60..0e48f792278 100644 --- a/src/storage/query/GetPropProcessor.cpp +++ b/src/storage/query/GetPropProcessor.cpp @@ -259,7 +259,7 @@ nebula::cpp2::ErrorCode GetPropProcessor::checkAndBuildContexts(const cpp2::GetP } nebula::cpp2::ErrorCode GetPropProcessor::buildTagContext(const cpp2::GetPropRequest& req) { - // req.vertex_props_ref().has_value() checked in methon checkRequest + // req.vertex_props_ref().has_value() checked in method checkRequest auto returnProps = (*req.vertex_props_ref()).empty() ? buildAllTagProps() : *req.vertex_props_ref(); auto ret = handleVertexProps(returnProps); @@ -272,7 +272,7 @@ nebula::cpp2::ErrorCode GetPropProcessor::buildTagContext(const cpp2::GetPropReq } nebula::cpp2::ErrorCode GetPropProcessor::buildEdgeContext(const cpp2::GetPropRequest& req) { - // req.edge_props_ref().has_value() checked in methon checkRequest + // req.edge_props_ref().has_value() checked in method checkRequest auto returnProps = (*req.edge_props_ref()).empty() ? buildAllEdgeProps(cpp2::EdgeDirection::BOTH) : *req.edge_props_ref(); auto ret = handleEdgeProps(returnProps); diff --git a/src/storage/query/QueryBaseProcessor.h b/src/storage/query/QueryBaseProcessor.h index bf1765ebfa1..3f75b0c5335 100644 --- a/src/storage/query/QueryBaseProcessor.h +++ b/src/storage/query/QueryBaseProcessor.h @@ -96,7 +96,7 @@ struct PropContext { struct TagContext { std::vector>> propContexts_; - // indicates whether TagID is in propContxts_ + // indicates whether TagID is in propContexts_ std::unordered_map indexMap_; // tagId -> tagName std::unordered_map tagNames_; @@ -110,7 +110,7 @@ struct TagContext { struct EdgeContext { // propContexts_, indexMap_, edgeNames_ will contain both +/- edges std::vector>> propContexts_; - // indicates whether EdgeType is in propContxts_ + // indicates whether EdgeType is in propContexts_ std::unordered_map indexMap_; // EdgeType -> edgeName std::unordered_map edgeNames_; diff --git a/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp b/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp index 9a8bbc1f3b8..cd42c365932 100644 --- a/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp +++ b/src/storage/test/AddAndUpdateVertexAndEdgeBenchmark.cpp @@ -231,22 +231,22 @@ void setUp(storage::StorageEnv* ev) { // v2 data if (!mockVertexData(ev, parts, spaceVidLen, true)) { - LOG(ERROR) << "Mock data faild"; + LOG(ERROR) << "Mock data failed"; return; } // v1 data if (!mockVertexData(ev, parts, spaceVidLen, false)) { - LOG(ERROR) << "Mock data faild"; + LOG(ERROR) << "Mock data failed"; return; } // v2 data if (!mockEdgeData(ev, parts, spaceVidLen, true)) { - LOG(ERROR) << "Mock data faild"; + LOG(ERROR) << "Mock data failed"; return; } // v1 data if (!mockEdgeData(ev, parts, spaceVidLen, false)) { - LOG(ERROR) << "Mock data faild"; + LOG(ERROR) << "Mock data failed"; return; } } @@ -438,7 +438,7 @@ void insertVertex(int32_t iters) { processor->process(req); auto resp = std::move(f).get(); if (!resp.result.failed_parts.empty()) { - LOG(ERROR) << "Add faild"; + LOG(ERROR) << "Add failed"; return; } } @@ -455,7 +455,7 @@ void insertEdge(int32_t iters) { processor->process(req); auto resp = std::move(f).get(); if (!resp.result.failed_parts.empty()) { - LOG(ERROR) << "Add faild"; + LOG(ERROR) << "Add failed"; return; } } @@ -473,7 +473,7 @@ void updateVertex(int32_t iters, bool isVersion2) { processor->process(req); auto resp = std::move(f).get(); if (!resp.result.failed_parts.empty()) { - LOG(ERROR) << "update faild"; + LOG(ERROR) << "update failed"; return; } } @@ -491,7 +491,7 @@ void updateEdge(int32_t iters, bool isVersion2) { auto resp = std::move(f).get(); if (!resp.result.failed_parts.empty()) { - LOG(ERROR) << "update faild"; + LOG(ERROR) << "update failed"; return; } } diff --git a/src/storage/test/AdminTaskManagerTest.cpp b/src/storage/test/AdminTaskManagerTest.cpp index 6864e8a7f09..deebf61c440 100644 --- a/src/storage/test/AdminTaskManagerTest.cpp +++ b/src/storage/test/AdminTaskManagerTest.cpp @@ -753,8 +753,8 @@ TEST(TaskManagerTest, cancel_a_task_while_some_sub_task_running) { folly::Promise task1_p; folly::Future task1_f = task1_p.getFuture(); - folly::Promise cancle_p; - folly::Future cancel = cancle_p.getFuture(); + folly::Promise cancel_p; + folly::Future cancel = cancel_p.getFuture(); folly::Promise subtask_run_p; folly::Future subtask_run_f = subtask_run_p.getFuture(); @@ -785,7 +785,7 @@ TEST(TaskManagerTest, cancel_a_task_while_some_sub_task_running) { LOG(INFO) << "before taskMgr->cancelTask(1);"; taskMgr->cancelTask(jobId); LOG(INFO) << "after taskMgr->cancelTask(1);"; - cancle_p.setValue(0); + cancel_p.setValue(0); task1_f.wait(); diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index f697c3e6be2..ceb3809be17 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -86,7 +86,7 @@ nebula_add_test( nebula_add_test( NAME - statis_task_test + stats_task_test SOURCES StatsTaskTest.cpp OBJECTS diff --git a/src/storage/test/CompactionTest.cpp b/src/storage/test/CompactionTest.cpp index 3e6aef4ac0b..7c4f8cf9079 100644 --- a/src/storage/test/CompactionTest.cpp +++ b/src/storage/test/CompactionTest.cpp @@ -21,7 +21,7 @@ namespace nebula { namespace storage { -// statis tag record count, can distinguish multiple versions +// stats tag record count, can distinguish multiple versions void checkTagVertexData(int32_t spaceVidLen, GraphSpaceID spaceId, TagID tagId, @@ -62,7 +62,7 @@ void checkTagVertexData(int32_t spaceVidLen, ASSERT_EQ(expectNum, totalCount); } -// statis edge record count, can distinguish multiple versions +// stats edge record count, can distinguish multiple versions void checkEdgeData(int32_t spaceVidLen, GraphSpaceID spaceId, EdgeType type, @@ -112,7 +112,7 @@ void checkEdgeData(int32_t spaceVidLen, ASSERT_EQ(expectNum, totalCount); } -// statis index record count +// stats index record count void checkIndexData( GraphSpaceID spaceId, IndexID indexId, int parts, StorageEnv* env, int expectNum) { int totalCount = 0; diff --git a/src/storage/test/GetNeighborsBenchmark.cpp b/src/storage/test/GetNeighborsBenchmark.cpp index d18c97224b9..c7a8e96ab55 100644 --- a/src/storage/test/GetNeighborsBenchmark.cpp +++ b/src/storage/test/GetNeighborsBenchmark.cpp @@ -348,7 +348,7 @@ BENCHMARK_RELATIVE(OneVertexFiveProperty, iters) { BENCHMARK_RELATIVE(OneVertexOnePropertyOnlyEdgeNode, iters) { goEdgeNode(iters, {"Tim Duncan"}, {"name"}, {"teamName"}); } -BENCHMARK_RELATIVE(OneVertexOneProperyOnlyKV, iters) { +BENCHMARK_RELATIVE(OneVertexOnePropertyOnlyKV, iters) { prefix(iters, {"Tim Duncan"}, {"name"}, {"teamName"}); } @@ -456,7 +456,7 @@ BENCHMARK_RELATIVE(TenVertexOnePropertyOnlyEdgeNode, iters) { {"name"}, {"teamName"}); } -BENCHMARK_RELATIVE(TenVertexOneProperyOnlyKV, iters) { +BENCHMARK_RELATIVE(TenVertexOnePropertyOnlyKV, iters) { prefix(iters, {"Tim Duncan", "Kobe Bryant", @@ -504,7 +504,7 @@ OneVertexOnlyId 119.10% 374.96us 2.67K OneVertexThreeProperty 59.50% 750.53us 1.33K OneVertexFiveProperty 46.25% 965.55us 1.04K OneVertexOnePropertyOnlyEdgeNode 113.92% 392.01us 2.55K -OneVertexOneProperyOnlyKV 113.08% 394.93us 2.53K +OneVertexOnePropertyOnlyKV 113.08% 394.93us 2.53K ---------------------------------------------------------------------------- NoFilter 444.84us 2.25K OneFilterNonePass 106.16% 419.01us 2.39K @@ -521,6 +521,6 @@ TenVertexOnlyId 119.68% 3.68ms 271.76 TenVertexThreeProperty 59.25% 7.43ms 134.53 TenVertexFiveProperty 45.67% 9.64ms 103.69 TenVertexOnePropertyOnlyEdgeNode 109.45% 4.02ms 248.53 -TenVertexOneProperyOnlyKV 109.23% 4.03ms 248.03 +TenVertexOnePropertyOnlyKV 109.23% 4.03ms 248.03 ============================================================================ */ diff --git a/src/storage/test/IndexWithTTLTest.cpp b/src/storage/test/IndexWithTTLTest.cpp index 88c16d49737..a42a2392fcc 100644 --- a/src/storage/test/IndexWithTTLTest.cpp +++ b/src/storage/test/IndexWithTTLTest.cpp @@ -425,7 +425,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTL) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"2021002"}); + parameter.set_task_specific_paras({"2021002"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); @@ -494,7 +494,7 @@ TEST(IndexWithTTLTest, RebuildEdgeIndexWithTTL) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"2021002"}); + parameter.set_task_specific_paras({"2021002"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); @@ -565,7 +565,7 @@ TEST(IndexWithTTLTest, RebuildTagIndexWithTTLExpired) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"2021002"}); + parameter.set_task_specific_paras({"2021002"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); @@ -636,7 +636,7 @@ TEST(IndexWithTTLTest, RebuildEdgeIndexWithTTLExpired) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"2021002"}); + parameter.set_task_specific_paras({"2021002"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); diff --git a/src/storage/test/KVClientTest.cpp b/src/storage/test/KVClientTest.cpp index fc8b2e86441..7249997686f 100644 --- a/src/storage/test/KVClientTest.cpp +++ b/src/storage/test/KVClientTest.cpp @@ -33,7 +33,7 @@ void checkResult(StorageRpcResponse& resp, size_t TEST(KVClientTest, SimpleTest) { GraphSpaceID spaceId = 1; fs::TempDir metaPath("/tmp/KVTest.meta.XXXXXX"); - fs::TempDir stoagePath("/tmp/KVTest.stoage.XXXXXX"); + fs::TempDir storagePath("/tmp/KVTest.storage.XXXXXX"); mock::MockCluster cluster; std::string storageName{"127.0.0.1"}; auto storagePort = network::NetworkUtils::getAvailablePort(); @@ -44,7 +44,7 @@ TEST(KVClientTest, SimpleTest) { options.localHost_ = storageAddr; options.role_ = meta::cpp2::HostRole::STORAGE; cluster.initMetaClient(options); - cluster.startStorage(storageAddr, stoagePath.path(), true); + cluster.startStorage(storageAddr, storagePath.path(), true); auto client = cluster.initGeneralStorageClient(); // kv interface test diff --git a/src/storage/test/LookupIndexTest.cpp b/src/storage/test/LookupIndexTest.cpp index 4f96001b67f..2e750da42c2 100644 --- a/src/storage/test/LookupIndexTest.cpp +++ b/src/storage/test/LookupIndexTest.cpp @@ -2104,7 +2104,7 @@ TEST_P(LookupIndexTest, NullablePropertyTest) { req.set_parts({1, 2, 3, 4, 5, 6}); req.set_return_columns({kVid}); - // bool range scan will be forbiden in query engine, so only test preix for + // bool range scan will be forbidden in query engine, so only test prefix for // bool { LOG(INFO) << "lookup on tag where tag.col_bool == true"; diff --git a/src/storage/test/QueryTestUtils.h b/src/storage/test/QueryTestUtils.h index 5b9d5d103c3..05b2371e69d 100644 --- a/src/storage/test/QueryTestUtils.h +++ b/src/storage/test/QueryTestUtils.h @@ -779,7 +779,7 @@ class QueryTestUtils { if (cols.size() < 2) { LOG(FATAL) << "Invalid column name"; } - // cols[1] is the tagName, which can be transfromed to entryId + // cols[1] is the tagName, which can be transformed to entryId auto entryId = folly::to(cols[1]); auto props = findExpectProps(entryId, tags, edges); switch (entryId) { @@ -877,7 +877,7 @@ class QueryTestUtils { return teammate.player1_ == player2 && teammate.player2_ == player1; }); if (iter == mock::MockData::teammates_.end()) { - LOG(FATAL) << "Can't find speicied teammate"; + LOG(FATAL) << "Can't find specified teammate"; } return *iter; } diff --git a/src/storage/test/RebuildIndexTest.cpp b/src/storage/test/RebuildIndexTest.cpp index ec598d52849..4dc34c715ec 100644 --- a/src/storage/test/RebuildIndexTest.cpp +++ b/src/storage/test/RebuildIndexTest.cpp @@ -75,7 +75,7 @@ TEST_F(RebuildIndexTest, RebuildTagIndexCheckALLData) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"4", "5"}); + parameter.set_task_specific_paras({"4", "5"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); @@ -162,7 +162,7 @@ TEST_F(RebuildIndexTest, RebuildEdgeIndexCheckALLData) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(parts); - parameter.set_task_specfic_paras({"103", "104"}); + parameter.set_task_specific_paras({"103", "104"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); @@ -259,7 +259,7 @@ TEST_F(RebuildIndexTest, RebuildTagIndexWithDelete) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(std::move(parts)); - parameter.set_task_specfic_paras({"4", "5"}); + parameter.set_task_specific_paras({"4", "5"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); @@ -320,7 +320,7 @@ TEST_F(RebuildIndexTest, RebuildTagIndexWithAppend) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(std::move(parts)); - parameter.set_task_specfic_paras({"4", "5"}); + parameter.set_task_specific_paras({"4", "5"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); @@ -370,7 +370,7 @@ TEST_F(RebuildIndexTest, RebuildTagIndex) { request.set_cmd(meta::cpp2::AdminCmd::REBUILD_TAG_INDEX); request.set_job_id(++gJobId); request.set_task_id(13); - parameter.set_task_specfic_paras({"4", "5"}); + parameter.set_task_specific_paras({"4", "5"}); request.set_para(std::move(parameter)); auto callback = [](nebula::cpp2::ErrorCode, nebula::meta::cpp2::StatsItem&) {}; @@ -420,7 +420,7 @@ TEST_F(RebuildIndexTest, RebuildEdgeIndexWithDelete) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(std::move(parts)); - parameter.set_task_specfic_paras({"103", "104"}); + parameter.set_task_specific_paras({"103", "104"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); @@ -482,7 +482,7 @@ TEST_F(RebuildIndexTest, RebuildEdgeIndexWithAppend) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(std::move(parts)); - parameter.set_task_specfic_paras({"103", "104"}); + parameter.set_task_specific_paras({"103", "104"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); @@ -526,7 +526,7 @@ TEST_F(RebuildIndexTest, RebuildEdgeIndex) { parameter.set_space_id(1); std::vector parts = {1, 2, 3, 4, 5, 6}; parameter.set_parts(std::move(parts)); - parameter.set_task_specfic_paras({"103", "104"}); + parameter.set_task_specific_paras({"103", "104"}); cpp2::AddAdminTaskRequest request; request.set_cmd(meta::cpp2::AdminCmd::REBUILD_EDGE_INDEX); diff --git a/src/storage/test/ScanEdgePropBenchmark.cpp b/src/storage/test/ScanEdgePropBenchmark.cpp index cbb26fa2106..92bb6b01cd7 100644 --- a/src/storage/test/ScanEdgePropBenchmark.cpp +++ b/src/storage/test/ScanEdgePropBenchmark.cpp @@ -195,7 +195,7 @@ TEST_P(ScanEdgePropBench, ProcessEdgeProps) { ASSERT_TRUE(code.ok()); result.mutableList().values.emplace_back(std::move(list)); } - LOG(WARNING) << "ProcessEdgeProps reader reset with vector schmeas: process " << edgeRowCount + LOG(WARNING) << "ProcessEdgeProps reader reset with vector schemas: process " << edgeRowCount << " edges takes " << watch.elapsed().count() << " us."; } { @@ -245,7 +245,7 @@ TEST_P(ScanEdgePropBench, ProcessEdgeProps) { result.mutableList().values.emplace_back(std::move(list)); } LOG(WARNING) << "ProcessEdgeProps only RowReaderV2 reset with vector " - "schmeas: process " + "schemas: process " << edgeRowCount << " edges takes " << watch.elapsed().count() << " us."; } } diff --git a/src/storage/test/StatsTaskTest.cpp b/src/storage/test/StatsTaskTest.cpp index f267df6eb82..dc8746e7ee3 100644 --- a/src/storage/test/StatsTaskTest.cpp +++ b/src/storage/test/StatsTaskTest.cpp @@ -103,7 +103,7 @@ TEST_F(StatsTaskTest, StatsTagAndEdgeData) { } } - // Check statis result + // Check stats result ASSERT_EQ(nebula::meta::cpp2::JobStatus::FINISHED, statsItem.get_status()); // Three tags ASSERT_EQ(3, (*statsItem.tag_vertices_ref()).size()); @@ -168,7 +168,7 @@ TEST_F(StatsTaskTest, StatsTagAndEdgeData) { } } - // Check statis result + // Check stats result ASSERT_EQ(nebula::meta::cpp2::JobStatus::FINISHED, statsItem.get_status()); // Three tags ASSERT_EQ(3, (*statsItem.tag_vertices_ref()).size()); @@ -239,7 +239,7 @@ TEST_F(StatsTaskTest, StatsTagAndEdgeData) { } } - // Check statis result + // Check stats result ASSERT_EQ(nebula::meta::cpp2::JobStatus::FINISHED, statsItem.get_status()); // Three tags ASSERT_EQ(3, (*statsItem.tag_vertices_ref()).size()); diff --git a/src/storage/test/StorageHttpAdminHandlerTest.cpp b/src/storage/test/StorageHttpAdminHandlerTest.cpp index e210774925c..2a7a3e098b4 100644 --- a/src/storage/test/StorageHttpAdminHandlerTest.cpp +++ b/src/storage/test/StorageHttpAdminHandlerTest.cpp @@ -65,14 +65,14 @@ static void checkInvalidRequest(const std::string& url, const std::string& errMs ASSERT_EQ(0, request(url).find(errMsg)); } -TEST(StoragehHttpAdminHandlerTest, TestInvalidRequests) { +TEST(StorageHttpAdminHandlerTest, TestInvalidRequests) { checkInvalidRequest("/admin", "Space should not be empty"); checkInvalidRequest("/admin?space=xx", "Op should not be empty"); checkInvalidRequest("/admin?space=xx&op=yy", "Can't find space xx"); checkInvalidRequest("/admin?space=1&op=yy", "Unknown operation yy"); } -TEST(StoragehHttpAdminHandlerTest, TestSupportedOperations) { +TEST(StorageHttpAdminHandlerTest, TestSupportedOperations) { ASSERT_EQ("ok", request("/admin?space=1&op=flush")); ASSERT_EQ("ok", request("/admin?space=1&op=compact")); } diff --git a/src/storage/test/StorageHttpStatsHandlerTest.cpp b/src/storage/test/StorageHttpStatsHandlerTest.cpp index 55ad976f56c..286950ee48d 100644 --- a/src/storage/test/StorageHttpStatsHandlerTest.cpp +++ b/src/storage/test/StorageHttpStatsHandlerTest.cpp @@ -70,7 +70,7 @@ TEST(StorageHttpStatsHandlerTest, GetStatsTest) { const std::string expect = "rocksdb.bytes.read=0\n"; ASSERT_STREQ(expect.c_str(), resp.value().c_str()); } - // Get multipple stats + // Get multiple stats { auto url = "/rocksdb_stats?stats=rocksdb.bytes.read,rocksdb.block.cache.add"; auto request = diff --git a/src/storage/test/StorageIndexWriteBenchmark.cpp b/src/storage/test/StorageIndexWriteBenchmark.cpp index dde6bdadd00..b76fe4edd6b 100644 --- a/src/storage/test/StorageIndexWriteBenchmark.cpp +++ b/src/storage/test/StorageIndexWriteBenchmark.cpp @@ -31,7 +31,7 @@ using NewTag = nebula::storage::cpp2::NewTag; enum class IndexENV : uint8_t { NO_INDEX = 1, ONE_INDEX = 2, - MULITPLE_INDEX = 3, + MULTIPLE_INDEX = 3, INVALID_INDEX = 4, }; @@ -97,7 +97,7 @@ std::unique_ptr memIndexMan(IndexENV type) { indexMan->addTagIndex(spaceId, -1, indexId, mockTagIndexColumns()); break; } - case IndexENV::MULITPLE_INDEX: { + case IndexENV::MULTIPLE_INDEX: { indexMan->addTagIndex(spaceId, tagId, indexId, mockTagIndexColumns()); indexMan->addTagIndex(spaceId, tagId, indexId + 1, mockTagIndexColumns()); indexMan->addTagIndex(spaceId, tagId, indexId + 2, mockTagIndexColumns()); @@ -315,7 +315,7 @@ void insertVerticesMultIndex() { int32_t vId = 0; BENCHMARK_SUSPEND { std::string dataPath = folly::stringPrintf("%s/%s", FLAGS_root_data_path.c_str(), "multIndex"); - initEnv(IndexENV::MULITPLE_INDEX, dataPath, env, kv, sm, im); + initEnv(IndexENV::MULTIPLE_INDEX, dataPath, env, kv, sm, im); }; while (vId < FLAGS_total_vertices_size) { @@ -361,7 +361,7 @@ int main(int argc, char** argv) { * withoutIndex: Without index, insert data only. * unmatchIndex: There are no matched indexes. * attachIndex: One index, the index contains all the columns of tag. - * duplicateVerticesIndex: One index, and insert deplicate vertices. + * duplicateVerticesIndex: One index, and insert duplicate vertices. * multipleIndex: Three indexes by one tag. * * 56 processors, Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz diff --git a/src/storage/test/UpdateEdgeTest.cpp b/src/storage/test/UpdateEdgeTest.cpp index 8222081eed2..86d66825c69 100644 --- a/src/storage/test/UpdateEdgeTest.cpp +++ b/src/storage/test/UpdateEdgeTest.cpp @@ -377,7 +377,7 @@ TEST(UpdateEdgeTest, Filter_Yield_Test) { auto* srcExp2 = EdgePropertyExpression::make(pool, "101", "endYear"); auto* priExp2 = ConstantExpression::make(pool, 2017L); auto* right = RelationalExpression::makeEQ(pool, srcExp2, priExp2); - // left AND right is ture + // left AND right is true auto logExp = LogicalExpression::makeAnd(pool, left, right); req.set_condition(Expression::encode(*logExp)); @@ -748,7 +748,7 @@ TEST(UpdateEdgeTest, Invalid_Filter_Test) { auto* srcExp2 = EdgePropertyExpression::make(pool, "101", "birth"); auto* priExp2 = ConstantExpression::make(pool, 1990L); auto* right = RelationalExpression::makeEQ(pool, srcExp2, priExp2); - // left AND right is ture + // left AND right is true auto logExp = LogicalExpression::makeAnd(pool, left, right); req.set_condition(Expression::encode(*logExp)); @@ -1542,7 +1542,7 @@ TEST(UpdateEdgeTest, Yield_Key_Test) { EXPECT_EQ("trade", val.getStr()); } -// Update faild, yield edge is illegal +// Update failed, yield edge is illegal TEST(UpdateEdgeTest, Yield_Illegal_Key_Test) { fs::TempDir rootPath("/tmp/UpdateEdgeTest.XXXXXX"); mock::MockCluster cluster; @@ -1649,7 +1649,7 @@ TEST(UpdateEdgeTest, Yield_Illegal_Key_Test) { EXPECT_EQ("zzzzz", val.getStr()); } -// Upsert, insert faild +// Upsert, insert failed // teamCareer filed has not default value and not nullable, not in set clause TEST(UpdateEdgeTest, Insertable_No_Default_Test) { fs::TempDir rootPath("/tmp/UpdateEdgeTest.XXXXXX"); diff --git a/src/storage/test/UpdateVertexTest.cpp b/src/storage/test/UpdateVertexTest.cpp index 1b6558ca593..35dceb7d91e 100644 --- a/src/storage/test/UpdateVertexTest.cpp +++ b/src/storage/test/UpdateVertexTest.cpp @@ -234,7 +234,7 @@ TEST(UpdateVertexTest, Filter_Yield_Test2) { auto* srcExp2 = SourcePropertyExpression::make(pool, "1", "endYear"); auto* priExp2 = ConstantExpression::make(pool, 2017L); auto* right = RelationalExpression::makeEQ(pool, srcExp2, priExp2); - // left AND right is ture + // left AND right is true auto logExp = LogicalExpression::makeAnd(pool, left, right); req.set_condition(Expression::encode(*logExp)); @@ -537,7 +537,7 @@ TEST(UpdateVertexTest, Invalid_Filter_Test) { auto* srcExp2 = SourcePropertyExpression::make(pool, "1", "birth"); auto* priExp2 = ConstantExpression::make(pool, 1990L); auto* right = RelationalExpression::makeEQ(pool, srcExp2, priExp2); - // left AND right is ture + // left AND right is true auto logExp = LogicalExpression::makeAnd(pool, left, right); req.set_condition(Expression::encode(*logExp)); @@ -1072,7 +1072,7 @@ TEST(UpdateVertexTest, TTL_Insert_Test) { EXPECT_EQ("Tim Duncan", (*resp.props_ref()).rows[0].values[4].getStr()); EXPECT_EQ(1, (*resp.props_ref()).rows[0].values[5].getInt()); - // Get player from kvstore directly, ttl expired data can be readed + // Get player from kvstore directly, ttl expired data can be readded // First record is inserted record data // Second record is expired ttl data auto prefix = NebulaKeyUtils::tagPrefix(spaceVidLen, partId, vertexId, tagId); @@ -1112,9 +1112,9 @@ TEST(UpdateVertexTest, TTL_Insert_Test) { EXPECT_EQ(1, count); } -// upsert, insert faild +// upsert, insert failed // age filed has not default value and not nullable, not in set clause -TEST(UpdateVertexTest, Insertable_No_Defalut_Test) { +TEST(UpdateVertexTest, Insertable_No_Default_Test) { fs::TempDir rootPath("/tmp/UpdateVertexTest.XXXXXX"); mock::MockCluster cluster; cluster.initStorageKV(rootPath.path()); diff --git a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp b/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp index 8587d5021b8..182484a4de0 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp +++ b/src/storage/transaction/ChainAddEdgesProcessorLocal.cpp @@ -468,10 +468,10 @@ std::string ChainAddEdgesProcessorLocal::makeReadableEdge(const cpp2::AddEdgesRe * * storage will insert datetime() as default value on both * in/out edge, but they will calculate independent - * which lead to inconsistance + * which lead to inconsistency * - * that's why we need to replace the inconsistance prone value - * at the monment the request comes + * that's why we need to replace the inconsistency prone value + * at the moment the request comes * */ void ChainAddEdgesProcessorLocal::replaceNullWithDefaultValue(cpp2::AddEdgesRequest& req) { auto& edgesOfPart = *req.parts_ref(); diff --git a/src/storage/transaction/ChainAddEdgesProcessorLocal.h b/src/storage/transaction/ChainAddEdgesProcessorLocal.h index 9f4962ba048..8e2329158a2 100644 --- a/src/storage/transaction/ChainAddEdgesProcessorLocal.h +++ b/src/storage/transaction/ChainAddEdgesProcessorLocal.h @@ -86,7 +86,7 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, /** * @brief a normal AddEdgeRequest may contain multi edges * even though they will fail or succeed as a batch in this time - * some of them may by overwrite by othere request + * some of them may by overwrite by other request * so when resume each edge */ cpp2::AddEdgesRequest makeSingleEdgeRequest(PartitionID partId, const cpp2::NewEdge& edge); @@ -114,10 +114,10 @@ class ChainAddEdgesProcessorLocal : public BaseProcessor, * * storage will insert datetime() as default value on both * in/out edge, but they will calculate independent - * which lead to inconsistance + * which lead to inconsistency * - * that why we need to replace the inconsistance prone value - * at the monment the request comes + * that why we need to replace the inconsistency prone value + * at the moment the request comes * */ void replaceNullWithDefaultValue(cpp2::AddEdgesRequest& req); diff --git a/src/storage/transaction/ConsistUtil.cpp b/src/storage/transaction/ConsistUtil.cpp index 05854f8a949..4edea1dfe0e 100644 --- a/src/storage/transaction/ConsistUtil.cpp +++ b/src/storage/transaction/ConsistUtil.cpp @@ -62,7 +62,7 @@ RequestType ConsistUtil::parseType(folly::StringPiece val) { case 'a': return RequestType::INSERT; default: - LOG(FATAL) << "shoule not happend, identifier is " << identifier; + LOG(FATAL) << "should not happen, identifier is " << identifier; } } diff --git a/src/storage/transaction/ResumeUpdateProcessor.h b/src/storage/transaction/ResumeUpdateProcessor.h index d0ff1d7766b..ea6272e43ef 100644 --- a/src/storage/transaction/ResumeUpdateProcessor.h +++ b/src/storage/transaction/ResumeUpdateProcessor.h @@ -12,7 +12,7 @@ namespace storage { /** * @brief - * if the TxnManager backgroud resume thread found a prime key + * if the TxnManager background resume thread found a prime key * it will create this processor to resume the complete update process */ class ResumeUpdateProcessor : public ChainUpdateEdgeProcessorLocal { diff --git a/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp b/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp index b2501cc920f..aca10ccad0c 100644 --- a/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp +++ b/src/storage/transaction/ResumeUpdateRemoteProcessor.cpp @@ -45,7 +45,7 @@ folly::SemiFuture ResumeUpdateRemoteProcessor::processLocal(Code code) { forwardToDelegateProcessor(); return code; } else { - // we can't decide if the double prime shoule be deleted. + // we can't decide if the double prime should be deleted. // so do nothing } diff --git a/src/storage/transaction/ResumeUpdateRemoteProcessor.h b/src/storage/transaction/ResumeUpdateRemoteProcessor.h index dbd9b780796..d1ce5d93438 100644 --- a/src/storage/transaction/ResumeUpdateRemoteProcessor.h +++ b/src/storage/transaction/ResumeUpdateRemoteProcessor.h @@ -12,7 +12,7 @@ namespace storage { /** * @brief - * if the TxnManager backgroud resume thread found a prime key + * if the TxnManager background resume thread found a prime key * it will create this processor to resume the complete update process */ class ResumeUpdateRemoteProcessor : public ChainUpdateEdgeProcessorLocal { diff --git a/src/tools/db-dump/DbDumpTool.cpp b/src/tools/db-dump/DbDumpTool.cpp index f412b758299..20bf447bd61 100644 --- a/src/tools/db-dump/DbDumpTool.cpp +++ b/src/tools/db-dump/DbDumpTool.cpp @@ -21,28 +21,28 @@ void printHelp() { Default: ./ --meta_server= - A list of meta severs' ip:port seperated by comma. + A list of meta severs' ip:port separated by comma. Default: 127.0.0.1:45500 --mode= scan | stat scan: print to screen when records meet the condition, and also print statistics to screen in final. stat: print statistics to screen. - Defualt: scan + Default: scan --vids= - A list of vid seperated by comma. This parameter means vertex_id/edge_src_id + A list of vid separated by comma. This parameter means vertex_id/edge_src_id Would scan the whole space's records if it is not given. --parts= - A list of partition id seperated by comma. - Would output all patitions if it is not given. + A list of partition id separated by comma. + Would output all partitions if it is not given. --tags= - A list of tag name seperated by comma. + A list of tag name separated by comma. --edges= - A list of edge name seperated by comma. + A list of edge name separated by comma. --limit= A positive number that limits the output. diff --git a/src/tools/db-dump/DbDumper.cpp b/src/tools/db-dump/DbDumper.cpp index 8be44a822e9..2f0fd52b15a 100644 --- a/src/tools/db-dump/DbDumper.cpp +++ b/src/tools/db-dump/DbDumper.cpp @@ -13,10 +13,10 @@ DEFINE_string(space_name, "", "The space name."); DEFINE_string(db_path, "./", "Path to rocksdb."); DEFINE_string(meta_server, "127.0.0.1:45500", "Meta servers' address."); DEFINE_string(mode, "scan", "Dump mode, scan | stat"); -DEFINE_string(parts, "", "A list of partition id seperated by comma."); -DEFINE_string(vids, "", "A list of vertex ids seperated by comma."); -DEFINE_string(tags, "", "A list of tag name seperated by comma."); -DEFINE_string(edges, "", "A list of edge name seperated by comma."); +DEFINE_string(parts, "", "A list of partition id separated by comma."); +DEFINE_string(vids, "", "A list of vertex ids separated by comma."); +DEFINE_string(tags, "", "A list of tag name separated by comma."); +DEFINE_string(edges, "", "A list of edge name separated by comma."); DEFINE_int64(limit, 1000, "Limit to output."); namespace nebula { @@ -110,7 +110,7 @@ Status DbDumper::initParams() { folly::splitTo(',', FLAGS_tags, std::inserter(tags, tags.begin()), true); folly::splitTo(',', FLAGS_edges, std::inserter(edges, edges.begin()), true); } catch (const std::exception& e) { - return Status::Error("Parse parts/vetexIds/tags/edges error: %s", e.what()); + return Status::Error("Parse parts/vertexIds/tags/edges error: %s", e.what()); } for (auto& tagName : tags) { @@ -129,7 +129,7 @@ Status DbDumper::initParams() { } if (FLAGS_mode.compare("scan") != 0 && FLAGS_mode.compare("stat") != 0) { - return Status::Error("Unkown mode '%s'.", FLAGS_mode.c_str()); + return Status::Error("Unknown mode '%s'.", FLAGS_mode.c_str()); } return Status::OK(); } diff --git a/src/tools/db-upgrade/DbUpgrader.cpp b/src/tools/db-upgrade/DbUpgrader.cpp index 76b8019fece..40c638a6354 100644 --- a/src/tools/db-upgrade/DbUpgrader.cpp +++ b/src/tools/db-upgrade/DbUpgrader.cpp @@ -372,7 +372,7 @@ void UpgraderSpace::doProcessV1() { // Parallel process part auto partConcurrency = std::min(static_cast(FLAGS_max_concurrent_parts), parts_.size()); - LOG(INFO) << "Max concurrenct parts: " << partConcurrency; + LOG(INFO) << "Max concurrent parts: " << partConcurrency; unFinishedPart_ = parts_.size(); @@ -581,7 +581,7 @@ void UpgraderSpace::doProcessV2() { // Parallel process part auto partConcurrency = std::min(static_cast(FLAGS_max_concurrent_parts), parts_.size()); - LOG(INFO) << "Max concurrenct parts: " << partConcurrency; + LOG(INFO) << "Max concurrent parts: " << partConcurrency; unFinishedPart_ = parts_.size(); LOG(INFO) << "Start to handle vertex/edge/index of parts data in space id " << spaceId_; @@ -1073,7 +1073,7 @@ void DbUpgrader::run() { // Parallel process space auto spaceConcurrency = std::min(static_cast(FLAGS_max_concurrent_spaces), upgraderSpaces.size()); - LOG(INFO) << "Max concurrenct spaces: " << spaceConcurrency; + LOG(INFO) << "Max concurrent spaces: " << spaceConcurrency; for (size_t i = 0; i < spaceConcurrency; ++i) { pool_->add(std::bind(&DbUpgrader::doSpace, this)); diff --git a/src/tools/db-upgrade/DbUpgrader.h b/src/tools/db-upgrade/DbUpgrader.h index a90f181b875..6851893f739 100644 --- a/src/tools/db-upgrade/DbUpgrader.h +++ b/src/tools/db-upgrade/DbUpgrader.h @@ -112,7 +112,7 @@ class UpgraderSpace { void runPartV2(); public: - // Souce data path + // Source data path std::string srcPath_; // Destination data path std::string dstPath_; @@ -192,7 +192,7 @@ class DbUpgrader { meta::MetaClient* metaClient_; meta::ServerBasedSchemaManager* schemaMan_; meta::IndexManager* indexMan_; - // Souce data path + // Source data path std::string srcPath_; // Destination data path diff --git a/src/tools/db-upgrade/DbUpgraderTool.cpp b/src/tools/db-upgrade/DbUpgraderTool.cpp index f7702a9cd5b..94b35c51d22 100644 --- a/src/tools/db-upgrade/DbUpgraderTool.cpp +++ b/src/tools/db-upgrade/DbUpgraderTool.cpp @@ -36,7 +36,7 @@ void printHelp() { src_db_path and dst_db_path must be different. --upgrade_meta_server= - A list of meta severs' ip:port seperated by comma. + A list of meta severs' ip:port separated by comma. Default: 127.0.0.1:45500 --upgrade_version=<1|2> @@ -172,7 +172,7 @@ int main(int argc, char* argv[]) { LOG(INFO) << "Prepare phase end"; // Upgrade data - LOG(INFO) << "Upgrade phase bengin"; + LOG(INFO) << "Upgrade phase begin"; // The data path in storage conf is generally one, not too many. // So there is no need to control the number of threads here. diff --git a/src/tools/db-upgrade/NebulaKeyUtilsV1.h b/src/tools/db-upgrade/NebulaKeyUtilsV1.h index 9f14073d2ba..3226f34b626 100644 --- a/src/tools/db-upgrade/NebulaKeyUtilsV1.h +++ b/src/tools/db-upgrade/NebulaKeyUtilsV1.h @@ -181,7 +181,7 @@ class NebulaKeyUtilsV1 final { } static folly::StringPiece keyWithNoVersion(const folly::StringPiece& rawKey) { - // TODO(heng) We should change the method if varint data version supportted. + // TODO(heng) We should change the method if varint data version supported. return rawKey.subpiece(0, rawKey.size() - sizeof(int64_t)); } diff --git a/src/tools/db-upgrade/NebulaKeyUtilsV2.h b/src/tools/db-upgrade/NebulaKeyUtilsV2.h index 533e448ac0f..ad35cb5ae26 100644 --- a/src/tools/db-upgrade/NebulaKeyUtilsV2.h +++ b/src/tools/db-upgrade/NebulaKeyUtilsV2.h @@ -31,7 +31,7 @@ enum class NebulaKeyTypeV2 : uint32_t { * space property. * * LockKeyUtils: - * EdgeKeyWithNoVersion + placeHolder(8) + version(8) + surfix(2) + * EdgeKeyWithNoVersion + placeHolder(8) + version(8) + suffix(2) * */ const std::string kLockSuffix = "lk"; // NOLINT @@ -224,7 +224,7 @@ class NebulaKeyUtilsV2 final { } static folly::StringPiece keyWithNoVersion(const folly::StringPiece& rawKey) { - // TODO(heng) We should change the method if varint data version supportted. + // TODO(heng) We should change the method if varint data version supported. return rawKey.subpiece(0, rawKey.size() - sizeof(int64_t)); } @@ -244,14 +244,14 @@ class NebulaKeyUtilsV2 final { static EdgeVersion getLockVersion(const folly::StringPiece& rawKey) { // TODO(liuyu) We should change the method if varint data version - // supportted. + // supported. auto offset = rawKey.size() - sizeof(int64_t) * 2 - kLockSuffix.size(); return readInt(rawKey.data() + offset, sizeof(int64_t)); } static folly::StringPiece lockWithNoVersion(const folly::StringPiece& rawKey) { // TODO(liuyu) We should change the method if varint data version - // supportted. + // supported. return rawKey.subpiece(0, rawKey.size() - sizeof(int64_t) * 2 - kLockSuffix.size()); } diff --git a/src/tools/storage-perf/StorageIntegrityTool.cpp b/src/tools/storage-perf/StorageIntegrityTool.cpp index 287136ee31b..e67c96579cc 100644 --- a/src/tools/storage-perf/StorageIntegrityTool.cpp +++ b/src/tools/storage-perf/StorageIntegrityTool.cpp @@ -13,7 +13,7 @@ DEFINE_string(meta_server_addrs, "", "meta server address"); DEFINE_int32(io_threads, 10, "client io threads"); -DEFINE_int32(partition_num, 1024, "partititon for space"); +DEFINE_int32(partition_num, 1024, "partition for space"); DEFINE_string(space_name, "test_space", "the space name"); DEFINE_string(tag_name, "test_tag", "the tag name"); DEFINE_string(prop_name, "test_prop", "the property name"); @@ -33,7 +33,7 @@ namespace storage { * * There are some gflags we need to pay attention: * 1. The space's replica must be 1, because we don't have retry in - * StorageClient, we will update it after we suppport preheat. The tag must have + * StorageClient, we will update it after we support preheat. The tag must have * only one int property, which is prop_name. * 2. If the space and tag doesn't exists, it will try to create one, maybe you * need to set heartbeat_interval_secs to make sure the storage service has load diff --git a/src/tools/storage-perf/StoragePerfTool.cpp b/src/tools/storage-perf/StoragePerfTool.cpp index df8e3a59662..b7336097245 100644 --- a/src/tools/storage-perf/StoragePerfTool.cpp +++ b/src/tools/storage-perf/StoragePerfTool.cpp @@ -313,7 +313,7 @@ class Perf { if (!resps.succeeded()) { LOG(ERROR) << "Request failed!"; } else { - VLOG(3) << "request successed!"; + VLOG(3) << "request succeeded!"; } this->finishedRequests_++; auto now = time::WallClock::fastNowInMicroSec(); @@ -342,7 +342,7 @@ class Perf { << apache::thrift::util::enumNameSafe(entry.second); } } else { - VLOG(1) << "request successed!"; + VLOG(1) << "request succeeded!"; } this->finishedRequests_++; auto now = time::WallClock::fastNowInMicroSec(); @@ -368,7 +368,7 @@ class Perf { if (!resps.succeeded()) { LOG(ERROR) << "Request failed!"; } else { - VLOG(3) << "request successed!"; + VLOG(3) << "request succeeded!"; } this->finishedRequests_++; auto now = time::WallClock::fastNowInMicroSec(); @@ -401,7 +401,7 @@ class Perf { if (!resps.succeeded()) { LOG(ERROR) << "Request failed!"; } else { - VLOG(3) << "request successed!"; + VLOG(3) << "request succeeded!"; } this->finishedRequests_++; auto now = time::WallClock::fastNowInMicroSec(); @@ -426,7 +426,7 @@ class Perf { if (!resps.succeeded()) { LOG(ERROR) << "Request failed!"; } else { - VLOG(3) << "request successed!"; + VLOG(3) << "request succeeded!"; } this->finishedRequests_++; auto now = time::WallClock::fastNowInMicroSec(); diff --git a/tests/admin/test_configs.py b/tests/admin/test_configs.py index a6a7213b3ed..2a8fe58736f 100644 --- a/tests/admin/test_configs.py +++ b/tests/admin/test_configs.py @@ -60,7 +60,7 @@ def test_configs(self): expected_result = [ ['GRAPH', 'v', 'int', 'MUTABLE', v], ['GRAPH', 'minloglevel', 'int', 'MUTABLE', 0], - ['GRAPH', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 100], + ['GRAPH', 'slow_op_threshold_ms', 'int', 'MUTABLE', 100], ['GRAPH', 'heartbeat_interval_secs', 'int', 'MUTABLE', 1], ['GRAPH', 'meta_client_retry_times', 'int', 'MUTABLE', 3], ['GRAPH', 'accept_partial_success', 'bool', 'MUTABLE', False], @@ -80,7 +80,7 @@ def test_configs(self): ['STORAGE', 'wal_ttl', 'int', 'MUTABLE', 14400], ['STORAGE', 'minloglevel', 'int', 'MUTABLE', 0], ['STORAGE', 'custom_filter_interval_secs', 'int', 'MUTABLE', 86400], - ['STORAGE', 'slow_op_threshhold_ms', 'int', 'MUTABLE', 100], + ['STORAGE', 'slow_op_threshold_ms', 'int', 'MUTABLE', 100], ['STORAGE', 'heartbeat_interval_secs', 'int', 'MUTABLE', 1], ['STORAGE', 'meta_client_retry_times', 'int', 'MUTABLE', 3], ['STORAGE', 'rocksdb_db_options', 'map', 'MUTABLE', {}], @@ -118,7 +118,7 @@ def test_configs(self): ''') self.check_resp_succeeded(resp) - @pytest.mark.skip("The change of minloglevel will infulence the whole test.") + @pytest.mark.skip("The change of minloglevel will influence the whole test.") def test_update_configs(self): # set and get a config of all module resp = self.client.execute('UPDATE CONFIGS minloglevel={}'.format(2)) diff --git a/tests/admin/test_listener.py b/tests/admin/test_listener.py index 3fe4f95ade8..f59e0f8879b 100644 --- a/tests/admin/test_listener.py +++ b/tests/admin/test_listener.py @@ -22,7 +22,7 @@ def test_listener(self): resp = self.client.execute('ADD LISTENER ELASTICSEARCH {}:{}'.format(storage_ip, storage_port)) self.check_resp_failed(resp) - # Add non-existen host + # Add nonexistent host resp = self.client.execute('ADD LISTENER ELASTICSEARCH 127.0.0.1:8899') self.check_resp_succeeded(resp) diff --git a/tests/bench/data_generate.py b/tests/bench/data_generate.py index 55ca5577c7c..55ac70e9ee6 100644 --- a/tests/bench/data_generate.py +++ b/tests/bench/data_generate.py @@ -7,7 +7,7 @@ import string -def insert_vertexs(client, ns, batchCount, batchSize): +def insert_vertices(client, ns, batchCount, batchSize): resp = client.execute('USE ' + ns) client.check_resp_succeeded(resp) for i in range(batchCount): diff --git a/tests/bench/delete.py b/tests/bench/delete.py index 8a93efe6af7..42e129e6dcc 100644 --- a/tests/bench/delete.py +++ b/tests/bench/delete.py @@ -2,7 +2,7 @@ import pytest from graph import ttypes from tests.common.nebula_test_suite import NebulaTestSuite -from tests.bench.data_generate import insert_vertexs, insert_edges +from tests.bench.data_generate import insert_vertices, insert_edges class TestDeleteBench(NebulaTestSuite): @@ -29,7 +29,7 @@ def prepare(self): self.execute('CREATE EDGE IF NOT EXISTS like(likeness int)') self.check_resp_succeeded(resp) time.sleep(4) - insert_vertexs(self, "benchdeletespace", 50, 20000) + insert_vertices(self, "benchdeletespace", 50, 20000) insert_edges(self, "benchdeletespace", 50, 20000) @classmethod diff --git a/tests/bench/lookup.py b/tests/bench/lookup.py index fc6f766641e..81eeaa5c011 100644 --- a/tests/bench/lookup.py +++ b/tests/bench/lookup.py @@ -2,7 +2,7 @@ import pytest from graph import ttypes from tests.common.nebula_test_suite import NebulaTestSuite -from tests.bench.data_generate import insert_vertexs, insert_edges +from tests.bench.data_generate import insert_vertices, insert_edges class TestLookupBench(NebulaTestSuite): @@ -26,7 +26,7 @@ def prepare(self): 'CREATE TAG index IF NOT EXISTS personAge on person(age)') self.check_resp_succeeded(resp) time.sleep(4) - insert_vertexs(self, "benchlookupspace", 50, 20000) + insert_vertices(self, "benchlookupspace", 50, 20000) self.execute('CREATE EDGE IF NOT EXISTS like(likeness int)') self.check_resp_succeeded(resp) time.sleep(4) diff --git a/tests/common/plan_differ.py b/tests/common/plan_differ.py index f9d8800cc3d..132cbb6a809 100644 --- a/tests/common/plan_differ.py +++ b/tests/common/plan_differ.py @@ -182,16 +182,16 @@ def _extract_dict_from_obj(self, obj) -> dict: def _validate_expect(self, rows, column_names): # Check expected plan column if self.ID not in column_names: - self._err_msg = "Plan node id column is missing in expectde plan" + self._err_msg = "Plan node id column is missing in expected plan" return False if self.NAME not in column_names: - self._err_msg = "Plan node name column is missing in expectde plan" + self._err_msg = "Plan node name column is missing in expected plan" return False if self.DEPENDS not in column_names: - self._err_msg = "Plan node dependencies column is missing in expectde plan" + self._err_msg = "Plan node dependencies column is missing in expected plan" return False if self.OP_INFO not in column_names: - self._err_msg = "Plan node operator info column is missing in expectde plan" + self._err_msg = "Plan node operator info column is missing in expected plan" return False id_idx_dict = {} @@ -199,7 +199,7 @@ def _validate_expect(self, rows, column_names): for i in range(len(rows)): node_id = rows[i][0] if not node_id: - self._err_msg = "Plan node id is missing in expectde plan" + self._err_msg = "Plan node id is missing in expected plan" return False id_idx_dict[int(node_id)] = i diff --git a/tests/common/utils.py b/tests/common/utils.py index 8bff391612c..6867835015f 100644 --- a/tests/common/utils.py +++ b/tests/common/utils.py @@ -109,7 +109,7 @@ def compare_value(real, expect): esrc, edst = eedge.src, eedge.dst if eedge.type < 0: esrc, edst = edst, esrc - # ignore props comparation + # ignore props comparison return rsrc == esrc and rdst == edst \ and redge.ranking == eedge.ranking \ and redge.name == eedge.name diff --git a/tests/data/nba.ngql b/tests/data/nba.ngql index a1e4402fcab..4b84145723e 100644 --- a/tests/data/nba.ngql +++ b/tests/data/nba.ngql @@ -75,7 +75,7 @@ VALUES "Dirk Nowitzki": ("Dirk Nowitzki", 40), "Paul George": ("Paul George", 28), "Grant Hill": ("Grant Hill", 46), - "Shaquile O'Neal": ("Shaquile O'Neal", 47), + "Shaquille O'Neal": ("Shaquille O'Neal", 47), "JaVale McGee": ("JaVale McGee", 31), "Dwight Howard": ("Dwight Howard", 33); @@ -256,12 +256,12 @@ VALUES "Grant Hill" -> "Magic": (2000, 2007), "Grant Hill" -> "Suns": (2007, 2012), "Grant Hill" -> "Clippers": (2012, 2013), - "Shaquile O'Neal" -> "Magic": (1992, 1996), - "Shaquile O'Neal" -> "Lakers": (1996, 2004), - "Shaquile O'Neal" -> "Heat": (2004, 2008), - "Shaquile O'Neal" -> "Suns": (2008, 2009), - "Shaquile O'Neal" -> "Cavaliers": (2009, 2010), - "Shaquile O'Neal" -> "Celtics": (2010, 2011), + "Shaquille O'Neal" -> "Magic": (1992, 1996), + "Shaquille O'Neal" -> "Lakers": (1996, 2004), + "Shaquille O'Neal" -> "Heat": (2004, 2008), + "Shaquille O'Neal" -> "Suns": (2008, 2009), + "Shaquille O'Neal" -> "Cavaliers": (2009, 2010), + "Shaquille O'Neal" -> "Celtics": (2010, 2011), "JaVale McGee" -> "Wizards": (2008, 2012), "JaVale McGee" -> "Nuggets": (2012, 2015), "JaVale McGee" -> "Mavericks": (2015, 2016), @@ -331,7 +331,7 @@ VALUES "Joel Embiid" -> "Ben Simmons": (80), "Damian Lillard" -> "LaMarcus Aldridge": (80), "Yao Ming" -> "Tracy McGrady": (90), - "Yao Ming" -> "Shaquile O'Neal": (90), + "Yao Ming" -> "Shaquille O'Neal": (90), "Dejounte Murray" -> "Tim Duncan": (99), "Dejounte Murray" -> "Tony Parker": (99), "Dejounte Murray" -> "Manu Ginobili": (99), @@ -357,8 +357,8 @@ VALUES "Dirk Nowitzki" -> "Dwyane Wade": (10), "Paul George" -> "Russell Westbrook": (95), "Grant Hill" -> "Tracy McGrady": (90), - "Shaquile O'Neal" -> "JaVale McGee": (100), - "Shaquile O'Neal" -> "Tim Duncan": (80); + "Shaquille O'Neal" -> "JaVale McGee": (100), + "Shaquille O'Neal" -> "Tim Duncan": (80); INSERT EDGE teammate(start_year, end_year) diff --git a/tests/data/nba/like.csv b/tests/data/nba/like.csv index 176b72ccf30..a3108927a89 100644 --- a/tests/data/nba/like.csv +++ b/tests/data/nba/like.csv @@ -53,7 +53,7 @@ Dwyane Wade,Carmelo Anthony,90 Joel Embiid,Ben Simmons,80 Damian Lillard,LaMarcus Aldridge,80 Yao Ming,Tracy McGrady,90 -Yao Ming,Shaquile O'Neal,90 +Yao Ming,Shaquille O'Neal,90 Dejounte Murray,Tim Duncan,99 Dejounte Murray,Tony Parker,99 Dejounte Murray,Manu Ginobili,99 @@ -79,5 +79,5 @@ Dirk Nowitzki,Jason Kidd,80 Dirk Nowitzki,Dwyane Wade,10 Paul George,Russell Westbrook,95 Grant Hill,Tracy McGrady,90 -Shaquile O'Neal,JaVale McGee,100 -Shaquile O'Neal,Tim Duncan,80 +Shaquille O'Neal,JaVale McGee,100 +Shaquille O'Neal,Tim Duncan,80 diff --git a/tests/data/nba/player.csv b/tests/data/nba/player.csv index ba00e2d993b..55d720a2e91 100644 --- a/tests/data/nba/player.csv +++ b/tests/data/nba/player.csv @@ -48,6 +48,6 @@ Jason Kidd,Jason Kidd,45 Dirk Nowitzki,Dirk Nowitzki,40 Paul George,Paul George,28 Grant Hill,Grant Hill,46 -Shaquile O'Neal,Shaquile O'Neal,47 +Shaquille O'Neal,Shaquille O'Neal,47 JaVale McGee,JaVale McGee,31 Dwight Howard,Dwight Howard,33 diff --git a/tests/data/nba/serve.csv b/tests/data/nba/serve.csv index 0dc4308105d..3a1e8a31a36 100644 --- a/tests/data/nba/serve.csv +++ b/tests/data/nba/serve.csv @@ -134,12 +134,12 @@ Grant Hill,Pistons,0,1994,2000 Grant Hill,Magic,0,2000,2007 Grant Hill,Suns,0,2007,2012 Grant Hill,Clippers,0,2012,2013 -Shaquile O'Neal,Magic,0,1992,1996 -Shaquile O'Neal,Lakers,0,1996,2004 -Shaquile O'Neal,Heat,0,2004,2008 -Shaquile O'Neal,Suns,0,2008,2009 -Shaquile O'Neal,Cavaliers,0,2009,2010 -Shaquile O'Neal,Celtics,0,2010,2011 +Shaquille O'Neal,Magic,0,1992,1996 +Shaquille O'Neal,Lakers,0,1996,2004 +Shaquille O'Neal,Heat,0,2004,2008 +Shaquille O'Neal,Suns,0,2008,2009 +Shaquille O'Neal,Cavaliers,0,2009,2010 +Shaquille O'Neal,Celtics,0,2010,2011 JaVale McGee,Wizards,0,2008,2012 JaVale McGee,Nuggets,0,2012,2015 JaVale McGee,Mavericks,0,2015,2016 diff --git a/tests/job/test_session.py b/tests/job/test_session.py index 644536e5b46..19b602a23c6 100644 --- a/tests/job/test_session.py +++ b/tests/job/test_session.py @@ -152,7 +152,7 @@ def get_connection(ip, port): resp = conn1.execute(session_id, 'CREATE SPACE IF NOT EXISTS aSpace(partition_num=1, vid_type=FIXED_STRING(8));USE aSpace;') self.check_resp_succeeded(ResultSet(resp, 0)) - # time::WallClock::fastNowInMicroSec() is not syncronous in different process, + # time::WallClock::fastNowInMicroSec() is not synchronous in different process, # so we sleep 3 seconds here and charge session time.sleep(3) resp = conn1.execute(session_id, 'USE aSpace;') diff --git a/tests/query/stateless/test_update.py b/tests/query/stateless/test_update.py index d7d6bfd7c3c..77e0f37dcfc 100644 --- a/tests/query/stateless/test_update.py +++ b/tests/query/stateless/test_update.py @@ -128,7 +128,7 @@ def test_upsert_vertex(self): self.check_resp_succeeded(resp) self.check_out_of_order_result(resp.rows, expect_result) - # success: oder update, use default value from start + # success: order update, use default value from start cmd = 'UPSERT VERTEX 202 SET person.name = "bb", person.age = $^.person.start + 8, ' \ 'person.start = 10;' resp = self.execute(cmd) @@ -142,7 +142,7 @@ def test_upsert_vertex(self): self.check_resp_succeeded(resp) self.check_out_of_order_result(resp.rows, expect_result) - # success: oder update, use the update value from start + # success: order update, use the update value from start cmd = 'UPSERT VERTEX 202 SET person.name = "bb", person.start = 10, ' \ 'person.age = $^.person.start + 8;' resp = self.execute(cmd) @@ -249,7 +249,7 @@ def test_upsert_edge(self): self.check_resp_succeeded(resp) self.check_out_of_order_result(resp.rows, expect_result) - # success: oder update, use default value from start + # success: order update, use default value from start cmd = 'UPSERT EDGE 204->205 OF study SET name = "bb", start = study.end - 1000, ' \ 'end = 60000;' resp = self.execute(cmd) @@ -263,7 +263,7 @@ def test_upsert_edge(self): self.check_resp_succeeded(resp) self.check_out_of_order_result(resp.rows, expect_result) - # success: oder update, use the update value from start + # success: order update, use the update value from start cmd = 'UPSERT EDGE 206->207 OF study SET end = 60000, start = study.end - 1000' resp = self.execute(cmd) self.check_resp_succeeded(resp) diff --git a/tests/tck/features/bugfix/MatchUsedInPipe.feature b/tests/tck/features/bugfix/MatchUsedInPipe.feature index fcc317eb646..f7d2277f8b2 100644 --- a/tests/tck/features/bugfix/MatchUsedInPipe.feature +++ b/tests/tck/features/bugfix/MatchUsedInPipe.feature @@ -25,7 +25,7 @@ Feature: Test match used in pipe | ("Tim Duncan") | ("Manu Ginobili") | | ("Tim Duncan") | ("Manu Ginobili") | | ("Tim Duncan") | ("Marco Belinelli") | - | ("Tim Duncan") | ("Shaquile O'Neal") | + | ("Tim Duncan") | ("Shaquille O'Neal") | | ("Tim Duncan") | ("Spurs") | | ("Tim Duncan") | ("Tiago Splitter") | | ("Tim Duncan") | ("Tony Parker") | @@ -41,7 +41,7 @@ Feature: Test match used in pipe Then the result should be, in any order, with relax comparison: | $-.n | $-.m | count(*) | | ("Tim Duncan") | ("Spurs") | 1 | - | ("Tim Duncan") | ("Shaquile O'Neal") | 1 | + | ("Tim Duncan") | ("Shaquille O'Neal") | 1 | | ("Tim Duncan") | ("Tiago Splitter") | 1 | | ("Tim Duncan") | ("Marco Belinelli") | 1 | | ("Tim Duncan") | ("Dejounte Murray") | 1 | diff --git a/tests/tck/features/bugfix/SubgraphBeforePipe.feature b/tests/tck/features/bugfix/SubgraphBeforePipe.feature index a4fa4381bb2..c0b66f3419e 100644 --- a/tests/tck/features/bugfix/SubgraphBeforePipe.feature +++ b/tests/tck/features/bugfix/SubgraphBeforePipe.feature @@ -23,7 +23,7 @@ Feature: Test get subgraph before pipe | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | @@ -53,7 +53,7 @@ Feature: Test get subgraph before pipe | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | @@ -89,12 +89,12 @@ Feature: Test get subgraph before pipe | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | @@ -107,14 +107,14 @@ Feature: Test get subgraph before pipe | | | [:serve "Boris Diaw"->"Jazz"@0] | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | | [:serve "Boris Diaw"->"Suns"@0] | - | | | [:like "Yao Ming"->"Shaquile O\'Neal"@0] | - | | | [:like "Shaquile O\'Neal"->"JaVale McGee"@0] | - | | | [:serve "Shaquile O\'Neal"->"Cavaliers"@0] | - | | | [:serve "Shaquile O\'Neal"->"Celtics"@0] | - | | | [:serve "Shaquile O\'Neal"->"Heat"@0] | - | | | [:serve "Shaquile O\'Neal"->"Lakers"@0] | - | | | [:serve "Shaquile O\'Neal"->"Magic"@0] | - | | | [:serve "Shaquile O\'Neal"->"Suns"@0] | + | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | + | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | + | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | + | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | + | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | + | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | + | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | + | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | | [:like "Marco Belinelli"->"Tony Parker"@0] | @@ -199,7 +199,7 @@ Feature: Test get subgraph before pipe | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | @@ -229,7 +229,7 @@ Feature: Test get subgraph before pipe | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | @@ -254,12 +254,12 @@ Feature: Test get subgraph before pipe | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | @@ -272,14 +272,14 @@ Feature: Test get subgraph before pipe | | | [:serve "Boris Diaw"->"Jazz"@0] | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | | [:serve "Boris Diaw"->"Suns"@0] | - | | | [:like "Yao Ming"->"Shaquile O\'Neal"@0] | - | | | [:like "Shaquile O\'Neal"->"JaVale McGee"@0] | - | | | [:serve "Shaquile O\'Neal"->"Cavaliers"@0] | - | | | [:serve "Shaquile O\'Neal"->"Celtics"@0] | - | | | [:serve "Shaquile O\'Neal"->"Heat"@0] | - | | | [:serve "Shaquile O\'Neal"->"Lakers"@0] | - | | | [:serve "Shaquile O\'Neal"->"Magic"@0] | - | | | [:serve "Shaquile O\'Neal"->"Suns"@0] | + | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | + | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | + | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | + | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | + | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | + | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | + | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | + | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | | [:like "Marco Belinelli"->"Tony Parker"@0] | diff --git a/tests/tck/features/delete/DeleteVertex.IntVid.feature b/tests/tck/features/delete/DeleteVertex.IntVid.feature index 89f4c6e7753..8843ddede3c 100644 --- a/tests/tck/features/delete/DeleteVertex.IntVid.feature +++ b/tests/tck/features/delete/DeleteVertex.IntVid.feature @@ -196,7 +196,7 @@ Feature: Delete int vid of vertex Then the result should be, in any order: | player.name | player.age | - Scenario: delete int vertex by pipe successed + Scenario: delete int vertex by pipe succeeded Given load "nba_int_vid" csv data to a new space # test delete with pipe wrong vid type When executing query: diff --git a/tests/tck/features/expression/Case.feature b/tests/tck/features/expression/Case.feature index f03f42fbb9d..5bbcddb56aa 100644 --- a/tests/tck/features/expression/Case.feature +++ b/tests/tck/features/expression/Case.feature @@ -153,9 +153,9 @@ Feature: Case Expression RETURN v.name, v.age """ Then the result should be, in any order: - | v.name | v.age | - | "Shaquile O'Neal" | 47 | - | "Grant Hill" | 46 | + | v.name | v.age | + | "Shaquille O'Neal" | 47 | + | "Grant Hill" | 46 | When executing query: """ MATCH (v:player) @@ -163,11 +163,11 @@ Feature: Case Expression RETURN CASE WHEN v.age > 46 THEN v.name WHEN v.age > 45 THEN v.age ELSE "nothing" END AS r """ Then the result should be, in any order: - | r | - | "nothing" | - | 46 | - | "Shaquile O'Neal" | - | "nothing" | + | r | + | "nothing" | + | 46 | + | "Shaquille O'Neal" | + | "nothing" | Scenario: mixed use of generic case and conditional case When executing query: diff --git a/tests/tck/features/expression/FunctionCall.feature b/tests/tck/features/expression/FunctionCall.feature index 5c5a6a00a46..228a65496e9 100644 --- a/tests/tck/features/expression/FunctionCall.feature +++ b/tests/tck/features/expression/FunctionCall.feature @@ -63,8 +63,8 @@ Feature: Function Call Expression RETURN concat(a.name,c.name) """ Then the result should be, in any order: - | concat(a.name,c.name) | - | "Shaquile O'NealLakers" | + | concat(a.name,c.name) | + | "Shaquille O'NealLakers" | When executing query: """ MATCH (a:player)-[b:serve]-(c:team{name: "Lakers"}) @@ -72,8 +72,8 @@ Feature: Function Call Expression RETURN concat(a.name, "hello") """ Then the result should be, in any order: - | concat(a.name,"hello") | - | "Shaquile O'Nealhello" | + | concat(a.name,"hello") | + | "Shaquille O'Nealhello" | Scenario: concat_ws When executing query: @@ -91,8 +91,8 @@ Feature: Function Call Expression RETURN concat_ws("@",a.name, "hello", b.likeness, c.name) as result """ Then the result should be, in any order: - | result | - | "Shaquile O'Neal@hello@Lakers" | + | result | + | "Shaquille O'Neal@hello@Lakers" | When executing query: """ MATCH (a:player)-[b:serve]-(c:team{name: "Lakers"}) @@ -100,8 +100,8 @@ Feature: Function Call Expression RETURN concat_ws("@",a.name, NULL, "hello", b.likeness, c.name) as result """ Then the result should be, in any order: - | result | - | "Shaquile O'Neal@hello@Lakers" | + | result | + | "Shaquille O'Neal@hello@Lakers" | When executing query: """ MATCH (a:player)-[b:serve]-(c:team{name: "Lakers"}) diff --git a/tests/tck/features/expression/Predicate.feature b/tests/tck/features/expression/Predicate.feature index f728c945530..f290f406605 100644 --- a/tests/tck/features/expression/Predicate.feature +++ b/tests/tck/features/expression/Predicate.feature @@ -119,7 +119,7 @@ Feature: Predicate | ("LeBron James" :player{age: 34, name: "LeBron James"}) | | ("Rajon Rondo" :player{age: 33, name: "Rajon Rondo"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Boris Diaw" :player{age: 36, name: "Boris Diaw"}) | | ("Aron Baynes" :player{age: 32, name: "Aron Baynes"}) | | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | @@ -140,7 +140,7 @@ Feature: Predicate MATCH(n:player) WHERE EXISTS("abc") RETURN n.name AS name ORDER BY name LIMIT 10 """ - Then a SyntaxError should be raised at runtime: The exists only accept LabelAttribe, Attribute and Subscript + Then a SyntaxError should be raised at runtime: The exists only accept LabelAttribute, Attribute and Subscript Then drop the used space Scenario: use a exists with null properties diff --git a/tests/tck/features/expression/RelationalExpr.feature b/tests/tck/features/expression/RelationalExpr.feature index f1c0f64e33a..2215e01c414 100644 --- a/tests/tck/features/expression/RelationalExpr.feature +++ b/tests/tck/features/expression/RelationalExpr.feature @@ -153,7 +153,7 @@ Feature: RelationalExpression | "Rajon Rondo" | 33 | | "Ray Allen" | 43 | | "Rudy Gay" | 32 | - | "Shaquile O'Neal" | 47 | + | "Shaquille O'Neal" | 47 | | "Steve Nash" | 45 | | "Tiago Splitter" | 34 | | "Tim Duncan" | 42 | @@ -196,7 +196,7 @@ Feature: RelationalExpression | "Rajon Rondo" | 33 | | "Ray Allen" | 43 | | "Rudy Gay" | 32 | - | "Shaquile O'Neal" | 47 | + | "Shaquille O'Neal" | 47 | | "Steve Nash" | 45 | | "Tiago Splitter" | 34 | | "Tim Duncan" | 42 | @@ -221,11 +221,11 @@ Feature: RelationalExpression MATCH (v:player) WHERE v.age - 5 >= 40 RETURN v """ Then the result should be, in any order: - | v | - | ("Jason Kidd" :player{age: 45, name: "Jason Kidd"}) | - | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | - | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | + | v | + | ("Jason Kidd" :player{age: 45, name: "Jason Kidd"}) | + | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | + | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | And the execution plan should be: | id | name | dependencies | operator info | | 9 | Project | 8 | | diff --git a/tests/tck/features/expression/UnaryExpr.feature b/tests/tck/features/expression/UnaryExpr.feature index 8ef2ad970bf..591e127e652 100644 --- a/tests/tck/features/expression/UnaryExpr.feature +++ b/tests/tck/features/expression/UnaryExpr.feature @@ -60,7 +60,7 @@ Feature: UnaryExpression | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Ray Allen" :player{age: 43, name: "Ray Allen"}) | | ("Boris Diaw" :player{age: 36, name: "Boris Diaw"}) | | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | @@ -93,7 +93,7 @@ Feature: UnaryExpression | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | And the execution plan should be: | id | name | dependencies | operator info | | 9 | Project | 8 | | diff --git a/tests/tck/features/fetch/FetchVertices.intVid.feature b/tests/tck/features/fetch/FetchVertices.intVid.feature index 8857a319102..f1470ff4cde 100644 --- a/tests/tck/features/fetch/FetchVertices.intVid.feature +++ b/tests/tck/features/fetch/FetchVertices.intVid.feature @@ -330,7 +330,7 @@ Feature: Fetch Int Vid Vertices GO FROM hash('Boris Diaw') over like YIELD like._dst as id, like._dst as id | FETCH PROP ON player $-.id YIELD player.name, player.age """ Then a SemanticError should be raised at runtime: - # only constant list or single colume of data is allowed in piped FETCH clause + # only constant list or single column of data is allowed in piped FETCH clause When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._src as src, like._dst as dst | FETCH PROP ON player $-.src, $-.dst YIELD vertex as node; @@ -411,8 +411,8 @@ Feature: Fetch Int Vid Vertices | "Tim Duncan" | "Tim Duncan" | {age: 42, name: "Tim Duncan"} | When executing query: """ - FETCH PROP ON * hash('Tim Duncan') YIELD id(vertex), keys(vertex) as keys, tags(vertex) as tagss, properties(vertex) as props + FETCH PROP ON * hash('Tim Duncan') YIELD id(vertex), keys(vertex) as keys, tags(vertex) as tags_, properties(vertex) as props """ Then the result should be, in any order, and the columns 0 should be hashed: - | id(VERTEX) | keys | tagss | props | + | id(VERTEX) | keys | tags_ | props | | "Tim Duncan" | ["age", "name", "speciality"] | ["bachelor", "player"] | {age: 42, name: "Tim Duncan", speciality: "psychology"} | diff --git a/tests/tck/features/fetch/FetchVertices.strVid.feature b/tests/tck/features/fetch/FetchVertices.strVid.feature index ed23bf04603..1710050fa56 100644 --- a/tests/tck/features/fetch/FetchVertices.strVid.feature +++ b/tests/tck/features/fetch/FetchVertices.strVid.feature @@ -272,7 +272,7 @@ Feature: Fetch String Vertices | "Tony Parker" | 36 | EMPTY | EMPTY | EMPTY | | "Tim Duncan" | 42 | EMPTY | "Tim Duncan" | "psychology" | - Scenario: fetch from varibles + Scenario: fetch from variables When executing query: """ $var = GO FROM 'Boris Diaw' over like YIELD like._dst as id; FETCH PROP ON player $var.id YIELD player.name, player.age @@ -331,11 +331,11 @@ Feature: Fetch String Vertices FETCH PROP ON player "Tim Duncan", "Yao Ming" YIELD vertex as node | go from id($-.node) over like yield like._dst """ Then the result should be, in any order: - | like._dst | - | "Shaquile O'Neal" | - | "Tracy McGrady" | - | "Manu Ginobili" | - | "Tony Parker" | + | like._dst | + | "Shaquille O'Neal" | + | "Tracy McGrady" | + | "Manu Ginobili" | + | "Tony Parker" | When executing query: """ FETCH PROP ON player "Tim Duncan" yield player.name as id | go from $-.id over like yield like._dst @@ -349,11 +349,11 @@ Feature: Fetch String Vertices $var = FETCH PROP ON player "Tim Duncan", "Yao Ming"; go from id($var.vertices_) over like yield like._dst """ Then the result should be, in any order: - | like._dst | - | "Manu Ginobili" | - | "Tony Parker" | - | "Shaquile O'Neal" | - | "Tracy McGrady" | + | like._dst | + | "Manu Ginobili" | + | "Tony Parker" | + | "Shaquille O'Neal" | + | "Tracy McGrady" | When executing query: """ FETCH PROP ON player 'Tony Parker' YIELD player.name as Name | @@ -441,7 +441,7 @@ Feature: Fetch String Vertices FETCH PROP ON * "Tim Duncan", "Boris Diaw" YIELD player.not_exist_prop """ Then a SemanticError should be raised at runtime: - # only constant list or single colume of data is allowed in piped FETCH clause + # only constant list or single column of data is allowed in piped FETCH clause When executing query: """ GO FROM 'Boris Diaw' over like YIELD like._src as src, like._dst as dst | FETCH PROP ON player $-.src, $-.dst; @@ -522,8 +522,8 @@ Feature: Fetch String Vertices | "Tim Duncan" | "Tim Duncan" | {age: 42, name: "Tim Duncan"} | When executing query: """ - FETCH PROP ON * 'Tim Duncan' YIELD id(vertex), keys(vertex) as keys, tags(vertex) as tagss, properties(vertex) as props + FETCH PROP ON * 'Tim Duncan' YIELD id(vertex), keys(vertex) as keys, tags(vertex) as tags_, properties(vertex) as props """ Then the result should be, in any order: - | id(VERTEX) | keys | tagss | props | + | id(VERTEX) | keys | tags_ | props | | "Tim Duncan" | ["age", "name", "speciality"] | ["bachelor", "player"] | {age: 42, name: "Tim Duncan", speciality: "psychology"} | diff --git a/tests/tck/features/geo/GeoBase.feature b/tests/tck/features/geo/GeoBase.feature index 0e0a33335bd..24983b3e051 100644 --- a/tests/tck/features/geo/GeoBase.feature +++ b/tests/tck/features/geo/GeoBase.feature @@ -81,12 +81,12 @@ Feature: Geo base """ CREATE EDGE test_2(geo geography DEFAULT ST_GeogFromText("LINESTRING(0 1, 2xxxx")); """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! When executing query: """ CREATE TAG test_3(geo geography(point) DEFAULT ST_GeogFromText("LineString(0 1, 2 3)")); """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! When executing query: """ CREATE TAG test_3(geo geography(linestring) DEFAULT ST_GeogFromText("LineString(0 1, 2 3)")); @@ -349,7 +349,7 @@ Feature: Geo base INSERT EDGE any_shape_edge(geo) VALUES "108"->"408":(ST_GeogFromText("POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1.0 1.0, 2.0 2.0, 0.0 2.0, 1.0 1.0))")); """ Then the execution should be successful - # Lookup on geo index agagin + # Lookup on geo index again When executing query: """ LOOKUP ON any_shape YIELD ST_ASText(any_shape.geo); diff --git a/tests/tck/features/go/GO.IntVid.feature b/tests/tck/features/go/GO.IntVid.feature index 8eeecc95b18..2dd3b6d20f3 100644 --- a/tests/tck/features/go/GO.IntVid.feature +++ b/tests/tck/features/go/GO.IntVid.feature @@ -376,7 +376,7 @@ Feature: IntegerVid Go Sentence | EMPTY | 90 | When executing query: """ - GO FROM hash("Shaquile O\'Neal") OVER serve, like YIELD serve._dst, like._dst + GO FROM hash("Shaquille O\'Neal") OVER serve, like YIELD serve._dst, like._dst """ Then the result should be, in any order, with relax comparison, and the columns 0,1 should be hashed: | serve._dst | like._dst | @@ -726,7 +726,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM hash('Tim Duncan') OVER like REVERSELY YIELD $$.player.name @@ -742,7 +742,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM hash('Tim Duncan') OVER like REVERSELY WHERE $$.player.age < 35 YIELD $$.player.name @@ -770,7 +770,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | EMPTY | | EMPTY | @@ -859,15 +859,15 @@ Feature: IntegerVid Go Sentence | "Cavaliers" | "Kyrie Irving" | | "Cavaliers" | "LeBron James" | | "Cavaliers" | "LeBron James" | - | "Cavaliers" | "Shaquile O'Neal" | - | "Cavaliers" | "Shaquile O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | | "Cavaliers" | "LeBron James" | | "Cavaliers" | "LeBron James" | | "Heat" | "Amar'e Stoudemire" | | "Heat" | "Dwyane Wade" | | "Heat" | "LeBron James" | | "Heat" | "Ray Allen" | - | "Heat" | "Shaquile O'Neal" | + | "Heat" | "Shaquille O'Neal" | | "Heat" | "Dwyane Wade" | | "Lakers" | "Dwight Howard" | | "Lakers" | "JaVale McGee" | @@ -875,7 +875,7 @@ Feature: IntegerVid Go Sentence | "Lakers" | "LeBron James" | | "Lakers" | "Paul Gasol" | | "Lakers" | "Rajon Rondo" | - | "Lakers" | "Shaquile O'Neal" | + | "Lakers" | "Shaquille O'Neal" | | "Lakers" | "Steve Nash" | When executing query: """ @@ -891,19 +891,19 @@ Feature: IntegerVid Go Sentence | "Cavaliers" | "Dwyane Wade" | | "Cavaliers" | "Kyrie Irving" | | "Cavaliers" | "Kyrie Irving" | - | "Cavaliers" | "Shaquile O'Neal" | - | "Cavaliers" | "Shaquile O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | | "Heat" | "Amar'e Stoudemire" | | "Heat" | "Dwyane Wade" | | "Heat" | "Ray Allen" | - | "Heat" | "Shaquile O'Neal" | + | "Heat" | "Shaquille O'Neal" | | "Heat" | "Dwyane Wade" | | "Lakers" | "Dwight Howard" | | "Lakers" | "JaVale McGee" | | "Lakers" | "Kobe Bryant" | | "Lakers" | "Paul Gasol" | | "Lakers" | "Rajon Rondo" | - | "Lakers" | "Shaquile O'Neal" | + | "Lakers" | "Shaquille O'Neal" | | "Lakers" | "Steve Nash" | When executing query: """ @@ -945,7 +945,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM hash('Tim Duncan') OVER serve, like bidirect YIELD serve._dst, like._dst @@ -964,7 +964,7 @@ Feature: IntegerVid Go Sentence | EMPTY | "Boris Diaw" | | EMPTY | "Tiago Splitter" | | EMPTY | "Dejounte Murray" | - | EMPTY | "Shaquile O'Neal" | + | EMPTY | "Shaquille O'Neal" | When executing query: """ GO FROM hash('Tim Duncan') OVER serve bidirect YIELD $$.team.name @@ -989,7 +989,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM hash('Tim Duncan') OVER like bidirect WHERE like.likeness > 90 @@ -1021,7 +1021,7 @@ Feature: IntegerVid Go Sentence | "Tim Duncan" | EMPTY | EMPTY | "Boris Diaw" | "Boris Diaw" | | "Tim Duncan" | EMPTY | EMPTY | "Tiago Splitter" | "Tiago Splitter" | | "Tim Duncan" | EMPTY | EMPTY | "Dejounte Murray" | "Dejounte Murray" | - | "Tim Duncan" | EMPTY | EMPTY | "Shaquile O'Neal" | "Shaquile O'Neal" | + | "Tim Duncan" | EMPTY | EMPTY | "Shaquille O'Neal" | "Shaquille O'Neal" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Tony Parker" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Manu Ginobili" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Danny Green" | @@ -1046,7 +1046,7 @@ Feature: IntegerVid Go Sentence | "Boris Diaw" | EMPTY | EMPTY | | "Tiago Splitter" | EMPTY | EMPTY | | "Dejounte Murray" | EMPTY | EMPTY | - | "Shaquile O'Neal" | EMPTY | EMPTY | + | "Shaquille O'Neal" | EMPTY | EMPTY | | EMPTY | EMPTY | "Tony Parker" | | EMPTY | EMPTY | "Manu Ginobili" | | EMPTY | EMPTY | "LaMarcus Aldridge" | @@ -1208,7 +1208,7 @@ Feature: IntegerVid Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1227,7 +1227,7 @@ Feature: IntegerVid Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1245,7 +1245,7 @@ Feature: IntegerVid Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1312,7 +1312,7 @@ Feature: IntegerVid Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | | "LeBron James" | @@ -1337,7 +1337,7 @@ Feature: IntegerVid Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | | "LeBron James" | diff --git a/tests/tck/features/go/GO.feature b/tests/tck/features/go/GO.feature index de2219d4965..7878a3f9017 100644 --- a/tests/tck/features/go/GO.feature +++ b/tests/tck/features/go/GO.feature @@ -425,7 +425,7 @@ Feature: Go Sentence | EMPTY | 90 | When executing query: """ - GO FROM "Shaquile O\'Neal" OVER serve, like YIELD serve._dst, like._dst + GO FROM "Shaquille O\'Neal" OVER serve, like YIELD serve._dst, like._dst """ Then the result should be, in any order, with relax comparison: | serve._dst | like._dst | @@ -775,7 +775,7 @@ Feature: Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM 'Tim Duncan' OVER like REVERSELY YIELD $$.player.name @@ -791,7 +791,7 @@ Feature: Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM 'Tim Duncan' OVER like REVERSELY WHERE $$.player.age < 35 YIELD $$.player.name @@ -819,7 +819,7 @@ Feature: Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | EMPTY | | EMPTY | @@ -908,15 +908,15 @@ Feature: Go Sentence | "Cavaliers" | "Kyrie Irving" | | "Cavaliers" | "LeBron James" | | "Cavaliers" | "LeBron James" | - | "Cavaliers" | "Shaquile O'Neal" | - | "Cavaliers" | "Shaquile O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | | "Cavaliers" | "LeBron James" | | "Cavaliers" | "LeBron James" | | "Heat" | "Amar'e Stoudemire" | | "Heat" | "Dwyane Wade" | | "Heat" | "LeBron James" | | "Heat" | "Ray Allen" | - | "Heat" | "Shaquile O'Neal" | + | "Heat" | "Shaquille O'Neal" | | "Heat" | "Dwyane Wade" | | "Lakers" | "Dwight Howard" | | "Lakers" | "JaVale McGee" | @@ -924,7 +924,7 @@ Feature: Go Sentence | "Lakers" | "LeBron James" | | "Lakers" | "Paul Gasol" | | "Lakers" | "Rajon Rondo" | - | "Lakers" | "Shaquile O'Neal" | + | "Lakers" | "Shaquille O'Neal" | | "Lakers" | "Steve Nash" | When executing query: """ @@ -940,19 +940,19 @@ Feature: Go Sentence | "Cavaliers" | "Dwyane Wade" | | "Cavaliers" | "Kyrie Irving" | | "Cavaliers" | "Kyrie Irving" | - | "Cavaliers" | "Shaquile O'Neal" | - | "Cavaliers" | "Shaquile O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | | "Heat" | "Amar'e Stoudemire" | | "Heat" | "Dwyane Wade" | | "Heat" | "Ray Allen" | - | "Heat" | "Shaquile O'Neal" | + | "Heat" | "Shaquille O'Neal" | | "Heat" | "Dwyane Wade" | | "Lakers" | "Dwight Howard" | | "Lakers" | "JaVale McGee" | | "Lakers" | "Kobe Bryant" | | "Lakers" | "Paul Gasol" | | "Lakers" | "Rajon Rondo" | - | "Lakers" | "Shaquile O'Neal" | + | "Lakers" | "Shaquille O'Neal" | | "Lakers" | "Steve Nash" | When executing query: """ @@ -994,7 +994,7 @@ Feature: Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM 'Tim Duncan' OVER serve, like bidirect YIELD serve._dst, like._dst @@ -1013,7 +1013,7 @@ Feature: Go Sentence | EMPTY | "Boris Diaw" | | EMPTY | "Tiago Splitter" | | EMPTY | "Dejounte Murray" | - | EMPTY | "Shaquile O'Neal" | + | EMPTY | "Shaquille O'Neal" | When executing query: """ GO FROM 'Tim Duncan' OVER serve bidirect YIELD $$.team.name @@ -1038,7 +1038,7 @@ Feature: Go Sentence | "Boris Diaw" | | "Tiago Splitter" | | "Dejounte Murray" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | When executing query: """ GO FROM 'Tim Duncan' OVER like bidirect WHERE like.likeness > 90 @@ -1070,7 +1070,7 @@ Feature: Go Sentence | "Tim Duncan" | EMPTY | EMPTY | "Boris Diaw" | "Boris Diaw" | | "Tim Duncan" | EMPTY | EMPTY | "Tiago Splitter" | "Tiago Splitter" | | "Tim Duncan" | EMPTY | EMPTY | "Dejounte Murray" | "Dejounte Murray" | - | "Tim Duncan" | EMPTY | EMPTY | "Shaquile O'Neal" | "Shaquile O'Neal" | + | "Tim Duncan" | EMPTY | EMPTY | "Shaquille O'Neal" | "Shaquille O'Neal" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Tony Parker" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Manu Ginobili" | | "Tim Duncan" | EMPTY | EMPTY | EMPTY | "Danny Green" | @@ -1095,7 +1095,7 @@ Feature: Go Sentence | "Boris Diaw" | EMPTY | EMPTY | | "Tiago Splitter" | EMPTY | EMPTY | | "Dejounte Murray" | EMPTY | EMPTY | - | "Shaquile O'Neal" | EMPTY | EMPTY | + | "Shaquille O'Neal" | EMPTY | EMPTY | | EMPTY | EMPTY | "Tony Parker" | | EMPTY | EMPTY | "Manu Ginobili" | | EMPTY | EMPTY | "LaMarcus Aldridge" | @@ -1264,7 +1264,7 @@ Feature: Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1283,7 +1283,7 @@ Feature: Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1301,7 +1301,7 @@ Feature: Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1368,7 +1368,7 @@ Feature: Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | | "LeBron James" | @@ -1393,7 +1393,7 @@ Feature: Go Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | | "LeBron James" | diff --git a/tests/tck/features/go/GoYieldVertexEdge.feature b/tests/tck/features/go/GoYieldVertexEdge.feature index 6bcdadf491a..81b0e8c1129 100644 --- a/tests/tck/features/go/GoYieldVertexEdge.feature +++ b/tests/tck/features/go/GoYieldVertexEdge.feature @@ -426,7 +426,7 @@ Feature: Go Yield Vertex And Edge Sentence | {likeness: 90} | "like" | When executing query: """ - GO FROM "Shaquile O\'Neal" OVER serve, like YIELD dst(edge) as dst + GO FROM "Shaquille O\'Neal" OVER serve, like YIELD dst(edge) as dst """ Then the result should be, in any order, with relax comparison: | dst | @@ -706,7 +706,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | [:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}] | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}] | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | [:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}] | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | [:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}] | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | [:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}] | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | [:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}] | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | When executing query: @@ -722,7 +722,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | [:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}] | "LaMarcus Aldridge" | "Tim Duncan" | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}] | "Manu Ginobili" | "Tim Duncan" | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | [:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}] | "Marco Belinelli" | "Tim Duncan" | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | [:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}] | "Shaquile O'Neal" | "Tim Duncan" | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | [:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}] | "Shaquille O'Neal" | "Tim Duncan" | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | [:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}] | "Tiago Splitter" | "Tim Duncan" | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | "Tony Parker" | "Tim Duncan" | When executing query: @@ -887,7 +887,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("Heat" :team{name: "Heat"}) | ("Dwyane Wade" :player{age: 37, name: "Dwyane Wade"}) | | ("Heat" :team{name: "Heat"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | | ("Heat" :team{name: "Heat"}) | ("Ray Allen" :player{age: 43, name: "Ray Allen"}) | - | ("Heat" :team{name: "Heat"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Heat" :team{name: "Heat"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Heat" :team{name: "Heat"}) | ("Dwyane Wade" :player{age: 37, name: "Dwyane Wade"}) | | ("Lakers" :team{name: "Lakers"}) | ("Dwight Howard" :player{age: 33, name: "Dwight Howard"}) | | ("Lakers" :team{name: "Lakers"}) | ("JaVale McGee" :player{age: 31, name: "JaVale McGee"}) | @@ -895,7 +895,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("Lakers" :team{name: "Lakers"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | | ("Lakers" :team{name: "Lakers"}) | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | | ("Lakers" :team{name: "Lakers"}) | ("Rajon Rondo" :player{age: 33, name: "Rajon Rondo"}) | - | ("Lakers" :team{name: "Lakers"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Lakers" :team{name: "Lakers"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Lakers" :team{name: "Lakers"}) | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("Danny Green" :player{age: 31, name: "Danny Green"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("Danny Green" :player{age: 31, name: "Danny Green"}) | @@ -905,8 +905,8 @@ Feature: Go Yield Vertex And Edge Sentence | ("Cavaliers" :team{name: "Cavaliers"}) | ("Kyrie Irving" :player{age: 26, name: "Kyrie Irving"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | - | ("Cavaliers" :team{name: "Cavaliers"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | - | ("Cavaliers" :team{name: "Cavaliers"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Cavaliers" :team{name: "Cavaliers"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | + | ("Cavaliers" :team{name: "Cavaliers"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | | ("Cavaliers" :team{name: "Cavaliers"}) | ("LeBron James" :player{age: 34, name: "LeBron James"}) | When executing query: @@ -916,23 +916,23 @@ Feature: Go Yield Vertex And Edge Sentence YIELD distinct edge as e """ Then the result should be, in any order, with relax comparison: - | e | - | [:serve "Amar'e Stoudemire"->"Heat" @0 {end_year: 2016, start_year: 2015}] | - | [:serve "Dwyane Wade"->"Heat" @0 {end_year: 2016, start_year: 2003}] | - | [:serve "Shaquile O'Neal"->"Cavaliers" @0 {end_year: 2010, start_year: 2009}] | - | [:serve "Ray Allen"->"Heat" @0 {end_year: 2014, start_year: 2012}] | - | [:serve "Shaquile O'Neal"->"Heat" @0 {end_year: 2008, start_year: 2004}] | - | [:serve "Dwyane Wade"->"Heat" @1 {end_year: 2019, start_year: 2018}] | - | [:serve "Dwight Howard"->"Lakers" @0 {end_year: 2013, start_year: 2012}] | - | [:serve "JaVale McGee"->"Lakers" @0 {end_year: 2019, start_year: 2018}] | - | [:serve "Kobe Bryant"->"Lakers" @0 {end_year: 2016, start_year: 1996}] | - | [:serve "Kyrie Irving"->"Cavaliers" @0 {end_year: 2017, start_year: 2011}] | - | [:serve "Paul Gasol"->"Lakers" @0 {end_year: 2014, start_year: 2008}] | - | [:serve "Rajon Rondo"->"Lakers" @0 {end_year: 2019, start_year: 2018}] | - | [:serve "Shaquile O'Neal"->"Lakers" @0 {end_year: 2004, start_year: 1996}] | - | [:serve "Steve Nash"->"Lakers" @0 {end_year: 2015, start_year: 2012}] | - | [:serve "Danny Green"->"Cavaliers" @0 {end_year: 2010, start_year: 2009}] | - | [:serve "Dwyane Wade"->"Cavaliers" @0 {end_year: 2018, start_year: 2017}] | + | e | + | [:serve "Amar'e Stoudemire"->"Heat" @0 {end_year: 2016, start_year: 2015}] | + | [:serve "Dwyane Wade"->"Heat" @0 {end_year: 2016, start_year: 2003}] | + | [:serve "Shaquille O'Neal"->"Cavaliers" @0 {end_year: 2010, start_year: 2009}] | + | [:serve "Ray Allen"->"Heat" @0 {end_year: 2014, start_year: 2012}] | + | [:serve "Shaquille O'Neal"->"Heat" @0 {end_year: 2008, start_year: 2004}] | + | [:serve "Dwyane Wade"->"Heat" @1 {end_year: 2019, start_year: 2018}] | + | [:serve "Dwight Howard"->"Lakers" @0 {end_year: 2013, start_year: 2012}] | + | [:serve "JaVale McGee"->"Lakers" @0 {end_year: 2019, start_year: 2018}] | + | [:serve "Kobe Bryant"->"Lakers" @0 {end_year: 2016, start_year: 1996}] | + | [:serve "Kyrie Irving"->"Cavaliers" @0 {end_year: 2017, start_year: 2011}] | + | [:serve "Paul Gasol"->"Lakers" @0 {end_year: 2014, start_year: 2008}] | + | [:serve "Rajon Rondo"->"Lakers" @0 {end_year: 2019, start_year: 2018}] | + | [:serve "Shaquille O'Neal"->"Lakers" @0 {end_year: 2004, start_year: 1996}] | + | [:serve "Steve Nash"->"Lakers" @0 {end_year: 2015, start_year: 2012}] | + | [:serve "Danny Green"->"Cavaliers" @0 {end_year: 2010, start_year: 2009}] | + | [:serve "Dwyane Wade"->"Cavaliers" @0 {end_year: 2018, start_year: 2017}] | When executing query: """ GO FROM 'Manu Ginobili' OVER like REVERSELY YIELD src(edge) AS id | @@ -966,7 +966,7 @@ Feature: Go Yield Vertex And Edge Sentence | [:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}] | | [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}] | | [:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}] | - | [:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}] | + | [:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | | [:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}] | @@ -994,7 +994,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | | ("Tony Parker" :player{age: 36, name: "Tony Parker"}) | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | @@ -1024,7 +1024,7 @@ Feature: Go Yield Vertex And Edge Sentence | [:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}] | | [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}] | | [:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}] | - | [:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}] | + | [:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | | [:teammate "Manu Ginobili"->"Tim Duncan" @0 {end_year: 2016, start_year: 2002}] | @@ -1212,7 +1212,7 @@ Feature: Go Yield Vertex And Edge Sentence | "Danny Green" | | "Aron Baynes" | | "Tiago Splitter" | - | "Shaquile O'Neal" | + | "Shaquille O'Neal" | | "Rudy Gay" | | "Damian Lillard" | When executing query: @@ -1236,7 +1236,7 @@ Feature: Go Yield Vertex And Edge Sentence | [:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}] | | [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}] | | [:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}] | - | [:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}] | + | [:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}] | | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | | [:like "Danny Green"->"Marco Belinelli" @0 {likeness: 83}] | @@ -1257,7 +1257,7 @@ Feature: Go Yield Vertex And Edge Sentence | ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}) | | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | When executing query: """ @@ -1333,7 +1333,7 @@ Feature: Go Yield Vertex And Edge Sentence | "LaMarcus Aldridge" | "Tim Duncan" | | "Manu Ginobili" | "Tim Duncan" | | "Dejounte Murray" | "LeBron James" | - | "Shaquile O'Neal" | "Tim Duncan" | + | "Shaquille O'Neal" | "Tim Duncan" | | "Tiago Splitter" | "Tim Duncan" | | "Dejounte Murray" | "Kyle Anderson" | | "Tim Duncan" | "Manu Ginobili" | diff --git a/tests/tck/features/insert/Insert.IntVid.feature b/tests/tck/features/insert/Insert.IntVid.feature index 520e24ea01e..1ee3326fa2e 100644 --- a/tests/tck/features/insert/Insert.IntVid.feature +++ b/tests/tck/features/insert/Insert.IntVid.feature @@ -30,7 +30,7 @@ Feature: Insert int vid of vertex and edge Scenario: insert vertex and edge test Given wait 3 seconds - # insert vretex with default property names + # insert vertex with default property names When try to execute query: """ INSERT VERTEX person VALUES hash("Tom"):("Tom", 18); diff --git a/tests/tck/features/insert/Insert.feature b/tests/tck/features/insert/Insert.feature index 38157303e0f..884de390357 100644 --- a/tests/tck/features/insert/Insert.feature +++ b/tests/tck/features/insert/Insert.feature @@ -33,7 +33,7 @@ Feature: Insert string vid of vertex and edge INSERT VERTEX person(name, age) VALUES "Tom":("Tom", 22) """ Then the execution should be successful - # insert vretex with default property names + # insert vertex with default property names When executing query: """ INSERT VERTEX person VALUES "Tom":("Tom", 18); diff --git a/tests/tck/features/lookup/ByIndex.feature b/tests/tck/features/lookup/ByIndex.feature index 357f66b5894..d6df792509c 100644 --- a/tests/tck/features/lookup/ByIndex.feature +++ b/tests/tck/features/lookup/ByIndex.feature @@ -244,12 +244,12 @@ Feature: Lookup by index itself | 'Grant Hill' | 'Magic' | 0 | | 'Grant Hill' | 'Suns' | 0 | | 'Grant Hill' | 'Clippers' | 0 | - | "Shaquile O'Neal" | 'Magic' | 0 | - | "Shaquile O'Neal" | 'Lakers' | 0 | - | "Shaquile O'Neal" | 'Heat' | 0 | - | "Shaquile O'Neal" | 'Suns' | 0 | - | "Shaquile O'Neal" | 'Cavaliers' | 0 | - | "Shaquile O'Neal" | 'Celtics' | 0 | + | "Shaquille O'Neal" | 'Magic' | 0 | + | "Shaquille O'Neal" | 'Lakers' | 0 | + | "Shaquille O'Neal" | 'Heat' | 0 | + | "Shaquille O'Neal" | 'Suns' | 0 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | + | "Shaquille O'Neal" | 'Celtics' | 0 | | 'JaVale McGee' | 'Wizards' | 0 | | 'JaVale McGee' | 'Nuggets' | 0 | | 'JaVale McGee' | 'Mavericks' | 0 | @@ -402,12 +402,12 @@ Feature: Lookup by index itself | 'Grant Hill' | 'Magic' | 0 | 2000 | | 'Grant Hill' | 'Suns' | 0 | 2007 | | 'Grant Hill' | 'Clippers' | 0 | 2012 | - | "Shaquile O'Neal" | 'Magic' | 0 | 1992 | - | "Shaquile O'Neal" | 'Lakers' | 0 | 1996 | - | "Shaquile O'Neal" | 'Heat' | 0 | 2004 | - | "Shaquile O'Neal" | 'Suns' | 0 | 2008 | - | "Shaquile O'Neal" | 'Cavaliers' | 0 | 2009 | - | "Shaquile O'Neal" | 'Celtics' | 0 | 2010 | + | "Shaquille O'Neal" | 'Magic' | 0 | 1992 | + | "Shaquille O'Neal" | 'Lakers' | 0 | 1996 | + | "Shaquille O'Neal" | 'Heat' | 0 | 2004 | + | "Shaquille O'Neal" | 'Suns' | 0 | 2008 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | 2009 | + | "Shaquille O'Neal" | 'Celtics' | 0 | 2010 | | 'JaVale McGee' | 'Wizards' | 0 | 2008 | | 'JaVale McGee' | 'Nuggets' | 0 | 2012 | | 'JaVale McGee' | 'Mavericks' | 0 | 2015 | @@ -458,59 +458,59 @@ Feature: Lookup by index itself LOOKUP ON player WHERE player.age > 40 YIELD player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age >= 40.0 YIELD player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | - | "Dirk Nowitzki" | 40 | - | "Kobe Bryant" | 40 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | + | "Dirk Nowitzki" | 40 | + | "Kobe Bryant" | 40 | When executing query: """ LOOKUP ON player WHERE player.age > 40.5 YIELD player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age >= 40.5 YIELD player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age < 40 diff --git a/tests/tck/features/lookup/ByIndex.intVid.feature b/tests/tck/features/lookup/ByIndex.intVid.feature index 1e56aa4b289..7f6c3466b63 100644 --- a/tests/tck/features/lookup/ByIndex.intVid.feature +++ b/tests/tck/features/lookup/ByIndex.intVid.feature @@ -244,12 +244,12 @@ Feature: Lookup by index itself in integer vid | 'Grant Hill' | 'Magic' | 0 | | 'Grant Hill' | 'Suns' | 0 | | 'Grant Hill' | 'Clippers' | 0 | - | "Shaquile O'Neal" | 'Magic' | 0 | - | "Shaquile O'Neal" | 'Lakers' | 0 | - | "Shaquile O'Neal" | 'Heat' | 0 | - | "Shaquile O'Neal" | 'Suns' | 0 | - | "Shaquile O'Neal" | 'Cavaliers' | 0 | - | "Shaquile O'Neal" | 'Celtics' | 0 | + | "Shaquille O'Neal" | 'Magic' | 0 | + | "Shaquille O'Neal" | 'Lakers' | 0 | + | "Shaquille O'Neal" | 'Heat' | 0 | + | "Shaquille O'Neal" | 'Suns' | 0 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | + | "Shaquille O'Neal" | 'Celtics' | 0 | | 'JaVale McGee' | 'Wizards' | 0 | | 'JaVale McGee' | 'Nuggets' | 0 | | 'JaVale McGee' | 'Mavericks' | 0 | @@ -402,12 +402,12 @@ Feature: Lookup by index itself in integer vid | 'Grant Hill' | 'Magic' | 0 | 2000 | | 'Grant Hill' | 'Suns' | 0 | 2007 | | 'Grant Hill' | 'Clippers' | 0 | 2012 | - | "Shaquile O'Neal" | 'Magic' | 0 | 1992 | - | "Shaquile O'Neal" | 'Lakers' | 0 | 1996 | - | "Shaquile O'Neal" | 'Heat' | 0 | 2004 | - | "Shaquile O'Neal" | 'Suns' | 0 | 2008 | - | "Shaquile O'Neal" | 'Cavaliers' | 0 | 2009 | - | "Shaquile O'Neal" | 'Celtics' | 0 | 2010 | + | "Shaquille O'Neal" | 'Magic' | 0 | 1992 | + | "Shaquille O'Neal" | 'Lakers' | 0 | 1996 | + | "Shaquille O'Neal" | 'Heat' | 0 | 2004 | + | "Shaquille O'Neal" | 'Suns' | 0 | 2008 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | 2009 | + | "Shaquille O'Neal" | 'Celtics' | 0 | 2010 | | 'JaVale McGee' | 'Wizards' | 0 | 2008 | | 'JaVale McGee' | 'Nuggets' | 0 | 2012 | | 'JaVale McGee' | 'Mavericks' | 0 | 2015 | @@ -458,59 +458,59 @@ Feature: Lookup by index itself in integer vid LOOKUP ON player WHERE player.age > 40 YIELD player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age >= 40.0 YIELD player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | - | "Dirk Nowitzki" | 40 | - | "Kobe Bryant" | 40 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | + | "Dirk Nowitzki" | 40 | + | "Kobe Bryant" | 40 | When executing query: """ LOOKUP ON player WHERE player.age > 40.5 YIELD player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age >= 40.5 YIELD player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | - | "Grant Hill" | 46 | - | "Jason Kidd" | 45 | - | "Manu Ginobili" | 41 | - | "Ray Allen" | 43 | - | "Shaquile O'Neal" | 47 | - | "Steve Nash" | 45 | - | "Tim Duncan" | 42 | - | "Vince Carter" | 42 | + | VertexID | Age | + | "Grant Hill" | 46 | + | "Jason Kidd" | 45 | + | "Manu Ginobili" | 41 | + | "Ray Allen" | 43 | + | "Shaquille O'Neal" | 47 | + | "Steve Nash" | 45 | + | "Tim Duncan" | 42 | + | "Vince Carter" | 42 | When executing query: """ LOOKUP ON player WHERE player.age < 40 diff --git a/tests/tck/features/lookup/Output.feature b/tests/tck/features/lookup/Output.feature index f0ffe610a0b..27e64f0a7c0 100644 --- a/tests/tck/features/lookup/Output.feature +++ b/tests/tck/features/lookup/Output.feature @@ -14,7 +14,7 @@ Feature: Lookup with output | 'Kobe Bryant' | | 'Dirk Nowitzki' | - Scenario: [1] tag ouput with yield rename + Scenario: [1] tag output with yield rename When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name | @@ -36,7 +36,7 @@ Feature: Lookup with output | 'Kobe Bryant' | | 'Dirk Nowitzki' | - Scenario: [1] tag ouput with yield rename by var + Scenario: [1] tag output with yield rename by var When executing query: """ $a = LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name; diff --git a/tests/tck/features/lookup/Output.intVid.feature b/tests/tck/features/lookup/Output.intVid.feature index b22430945ef..d177623e6df 100644 --- a/tests/tck/features/lookup/Output.intVid.feature +++ b/tests/tck/features/lookup/Output.intVid.feature @@ -14,7 +14,7 @@ Feature: Lookup with output in integer vid | 'Kobe Bryant' | | 'Dirk Nowitzki' | - Scenario: [1] tag ouput with yield rename + Scenario: [1] tag output with yield rename When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name | @@ -36,7 +36,7 @@ Feature: Lookup with output in integer vid | 'Kobe Bryant' | | 'Dirk Nowitzki' | - Scenario: [1] tag ouput with yield rename by var + Scenario: [1] tag output with yield rename by var When executing query: """ $a = LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name; diff --git a/tests/tck/features/lookup/TagIndexFullScan.feature b/tests/tck/features/lookup/TagIndexFullScan.feature index 85e189d9b0c..09169ed8d8b 100644 --- a/tests/tck/features/lookup/TagIndexFullScan.feature +++ b/tests/tck/features/lookup/TagIndexFullScan.feature @@ -336,7 +336,7 @@ Feature: Lookup tag index full scan | "Ricky Rubio" | 28 | | "Rudy Gay" | 32 | | "Russell Westbrook" | 30 | - | "Shaquile O'Neal" | 47 | + | "Shaquille O'Neal" | 47 | | "Stephen Curry" | 31 | | "Steve Nash" | 45 | | "Tiago Splitter" | 34 | diff --git a/tests/tck/features/match/Base.IntVid.feature b/tests/tck/features/match/Base.IntVid.feature index 20d15d7eae4..0b95df4156d 100644 --- a/tests/tck/features/match/Base.IntVid.feature +++ b/tests/tck/features/match/Base.IntVid.feature @@ -190,7 +190,7 @@ Feature: Basic match | "Tony Parker" | "Spurs" | "David West" | | "Tony Parker" | "Spurs" | "Dejounte Murray" | - Scenario: Uistinct + Scenario: Distinct When executing query: """ MATCH (:player{name:'Dwyane Wade'}) -[:like]-> () -[:like]-> (v3) diff --git a/tests/tck/features/match/Base.feature b/tests/tck/features/match/Base.feature index 205831567ca..26227d58e9b 100644 --- a/tests/tck/features/match/Base.feature +++ b/tests/tck/features/match/Base.feature @@ -204,12 +204,12 @@ Feature: Basic match MATCH (v:player)-[e:like]-(v2) where v.age == 38 RETURN * """ Then the result should be, in any order, with relax comparison: - | v | e | v2 | - | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Marc Gasol"->"Paul Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | - | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Kobe Bryant" @0 {likeness: 90}] | ("Kobe Bryant" :player{age: 40, name: "Kobe Bryant"}) | - | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Marc Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | - | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Shaquile O'Neal" @0 {likeness: 90}] | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | - | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}] | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | + | v | e | v2 | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Marc Gasol"->"Paul Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Kobe Bryant" @0 {likeness: 90}] | ("Kobe Bryant" :player{age: 40, name: "Kobe Bryant"}) | + | ("Paul Gasol" :player{age: 38, name: "Paul Gasol"}) | [:like "Paul Gasol"->"Marc Gasol" @0 {likeness: 99}] | ("Marc Gasol" :player{age: 34, name: "Marc Gasol"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Shaquille O'Neal" @0 {likeness: 90}] | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | [:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}] | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | When executing query: """ MATCH (v:player)-[e:like]->(v2) where id(v) == "Tim Duncan" RETURN DISTINCT properties(e) as props, e @@ -254,7 +254,7 @@ Feature: Basic match | "Tony Parker" | "Spurs" | "David West" | | "Tony Parker" | "Spurs" | "Dejounte Murray" | - Scenario: Uistinct + Scenario: Distinct When executing query: """ MATCH (:player{name:'Dwyane Wade'}) -[:like]-> () -[:like]-> (v3) diff --git a/tests/tck/features/match/MatchById.IntVid.feature b/tests/tck/features/match/MatchById.IntVid.feature index 56a0a5b6b7f..6db58ed487b 100644 --- a/tests/tck/features/match/MatchById.IntVid.feature +++ b/tests/tck/features/match/MatchById.IntVid.feature @@ -601,7 +601,7 @@ Feature: Integer Vid Match By Id | [[:like "Dejounte Murray"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Marco Belinelli"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | | [[:like "Danny Green"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Danny Green"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | | [[:like "Danny Green"->"Tim Duncan"@0],[:like "Danny Green"->"Marco Belinelli"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquile O'Neal"@0],[:like "Yao Ming"->"Tracy McGrady"@0]] | ("Tracy McGrady") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquille O'Neal"@0],[:like "Yao Ming"->"Tracy McGrady"@0]] | ("Tracy McGrady") | | [[:like "LaMarcus Aldridge"->"Tim Duncan"@0],[:like "Tony Parker"->"LaMarcus Aldridge"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "LaMarcus Aldridge"->"Tim Duncan"@0],[:like "LaMarcus Aldridge"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Dejounte Murray"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | @@ -774,10 +774,10 @@ Feature: Integer Vid Match By Id | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | - | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | + | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | @@ -822,8 +822,8 @@ Feature: Integer Vid Match By Id | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Tony Parker"->"Manu Ginobili"@0]] | ("Tony Parker") | | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0]] | ("Tim Duncan") | | [[:like "Tiago Splitter"->"Tim Duncan"@0],[:like "Tiago Splitter"->"Manu Ginobili"@0]] | ("Manu Ginobili") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquile O'Neal"@0]] | ("Yao Ming") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"JaVale McGee"@0]] | ("JaVale McGee") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquille O'Neal"@0]] | ("Yao Ming") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"JaVale McGee"@0]] | ("JaVale McGee") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Tony Parker"@0]] | ("Dejounte Murray") | diff --git a/tests/tck/features/match/MatchById.feature b/tests/tck/features/match/MatchById.feature index bded9c83ad8..d54e15c247c 100644 --- a/tests/tck/features/match/MatchById.feature +++ b/tests/tck/features/match/MatchById.feature @@ -601,7 +601,7 @@ Feature: Match By Id | [[:like "Dejounte Murray"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Marco Belinelli"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | | [[:like "Danny Green"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Danny Green"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | | [[:like "Danny Green"->"Tim Duncan"@0],[:like "Danny Green"->"Marco Belinelli"@0],[:like "Marco Belinelli"->"Tony Parker"@0]] | ("Tony Parker") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquile O'Neal"@0],[:like "Yao Ming"->"Tracy McGrady"@0]] | ("Tracy McGrady") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquille O'Neal"@0],[:like "Yao Ming"->"Tracy McGrady"@0]] | ("Tracy McGrady") | | [[:like "LaMarcus Aldridge"->"Tim Duncan"@0],[:like "Tony Parker"->"LaMarcus Aldridge"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "LaMarcus Aldridge"->"Tim Duncan"@0],[:like "LaMarcus Aldridge"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Dejounte Murray"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | @@ -774,10 +774,10 @@ Feature: Match By Id | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Marco Belinelli"->"Tim Duncan"@0]] | ("Marco Belinelli") | - | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | - | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"Tim Duncan"@0]] | ("Shaquile O'Neal") | + | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | + | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tony Parker"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"Tim Duncan"@0]] | ("Shaquille O'Neal") | | [[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Tim Duncan"->"Tony Parker"@0],[:like "Tiago Splitter"->"Tim Duncan"@0]] | ("Tiago Splitter") | @@ -822,8 +822,8 @@ Feature: Match By Id | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Tony Parker"->"Manu Ginobili"@0]] | ("Tony Parker") | | [[:like "Tim Duncan"->"Manu Ginobili"@0],[:like "Manu Ginobili"->"Tim Duncan"@0]] | ("Tim Duncan") | | [[:like "Tiago Splitter"->"Tim Duncan"@0],[:like "Tiago Splitter"->"Manu Ginobili"@0]] | ("Manu Ginobili") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquile O'Neal"@0]] | ("Yao Ming") | - | [[:like "Shaquile O'Neal"->"Tim Duncan"@0],[:like "Shaquile O'Neal"->"JaVale McGee"@0]] | ("JaVale McGee") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Yao Ming"->"Shaquille O'Neal"@0]] | ("Yao Ming") | + | [[:like "Shaquille O'Neal"->"Tim Duncan"@0],[:like "Shaquille O'Neal"->"JaVale McGee"@0]] | ("JaVale McGee") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Tim Duncan"->"Tony Parker"@0],[:like "Boris Diaw"->"Tony Parker"@0]] | ("Boris Diaw") | | [[:like "Tony Parker"->"Tim Duncan"@0],[:like "Dejounte Murray"->"Tony Parker"@0]] | ("Dejounte Murray") | diff --git a/tests/tck/features/match/MatchGroupBy.feature b/tests/tck/features/match/MatchGroupBy.feature index 7eca9555b48..720b4c7c8fe 100644 --- a/tests/tck/features/match/MatchGroupBy.feature +++ b/tests/tck/features/match/MatchGroupBy.feature @@ -97,13 +97,13 @@ Feature: Match GroupBy SKIP 10 LIMIT 6; """ Then the result should be, in order, with relax comparison: - | id | count | sum | max | min | age | lb | - | "Ray Allen" | 1 | 43.0 | 43 | 43 | 44.0 | ["player"] | - | "Shaquile O'Neal" | 1 | 47.0 | 47 | 47 | 48.0 | ["player"] | - | "Steve Nash" | 1 | 45.0 | 45 | 45 | 46.0 | ["player"] | - | "Tim Duncan" | 1 | 42.0 | 42 | 42 | 43.0 | ["bachelor", "player"] | - | "Tony Parker" | 1 | 36.0 | 36 | 36 | 37.0 | ["player"] | - | "Tracy McGrady" | 1 | 39.0 | 39 | 39 | 40.0 | ["player"] | + | id | count | sum | max | min | age | lb | + | "Ray Allen" | 1 | 43.0 | 43 | 43 | 44.0 | ["player"] | + | "Shaquille O'Neal" | 1 | 47.0 | 47 | 47 | 48.0 | ["player"] | + | "Steve Nash" | 1 | 45.0 | 45 | 45 | 46.0 | ["player"] | + | "Tim Duncan" | 1 | 42.0 | 42 | 42 | 43.0 | ["bachelor", "player"] | + | "Tony Parker" | 1 | 36.0 | 36 | 36 | 37.0 | ["player"] | + | "Tracy McGrady" | 1 | 39.0 | 39 | 39 | 40.0 | ["player"] | Scenario: [5] Match GroupBy When executing query: @@ -121,16 +121,16 @@ Feature: Match GroupBy SKIP 10 LIMIT 20; """ Then the result should be, in order, with relax comparison: - | id | count | sum | max | min | age | lb | - | "Shaquile O'Neal" | 1 | 47.0 | 31 | 47 | 48.0 | ["player"] | - | "Shaquile O'Neal" | 1 | 47.0 | 42 | 47 | 48.0 | ["bachelor", "player"] | - | "Steve Nash" | 4 | 180.0 | 45 | 45 | 46.0 | ["player"] | - | "Tim Duncan" | 2 | 84.0 | 41 | 42 | 43.0 | ["player"] | - | "Tony Parker" | 1 | 36.0 | 42 | 36 | 37.0 | ["bachelor", "player"] | - | "Tony Parker" | 2 | 72.0 | 41 | 36 | 37.0 | ["player"] | - | "Tracy McGrady" | 3 | 117.0 | 46 | 39 | 40.0 | ["player"] | - | "Vince Carter" | 2 | 84.0 | 45 | 42 | 43.0 | ["player"] | - | "Yao Ming" | 2 | 76.0 | 47 | 38 | 39.0 | ["player"] | + | id | count | sum | max | min | age | lb | + | "Shaquille O'Neal" | 1 | 47.0 | 31 | 47 | 48.0 | ["player"] | + | "Shaquille O'Neal" | 1 | 47.0 | 42 | 47 | 48.0 | ["bachelor", "player"] | + | "Steve Nash" | 4 | 180.0 | 45 | 45 | 46.0 | ["player"] | + | "Tim Duncan" | 2 | 84.0 | 41 | 42 | 43.0 | ["player"] | + | "Tony Parker" | 1 | 36.0 | 42 | 36 | 37.0 | ["bachelor", "player"] | + | "Tony Parker" | 2 | 72.0 | 41 | 36 | 37.0 | ["player"] | + | "Tracy McGrady" | 3 | 117.0 | 46 | 39 | 40.0 | ["player"] | + | "Vince Carter" | 2 | 84.0 | 45 | 42 | 43.0 | ["player"] | + | "Yao Ming" | 2 | 76.0 | 47 | 38 | 39.0 | ["player"] | Scenario: [6] Match GroupBy When executing query: diff --git a/tests/tck/features/match/PipeAndVariable.feature b/tests/tck/features/match/PipeAndVariable.feature index 071ce5f09c5..d175f0da03f 100644 --- a/tests/tck/features/match/PipeAndVariable.feature +++ b/tests/tck/features/match/PipeAndVariable.feature @@ -29,7 +29,7 @@ Feature: Pipe or use variable to store the match results | 42 | false | "Tim Duncan" | "LaMarcus Aldridge" | | 42 | false | "Tim Duncan" | "Manu Ginobili" | | 42 | false | "Tim Duncan" | "Marco Belinelli" | - | 42 | false | "Tim Duncan" | "Shaquile O'Neal" | + | 42 | false | "Tim Duncan" | "Shaquille O'Neal" | | 42 | false | "Tim Duncan" | "Tiago Splitter" | | 42 | true | "Tim Duncan" | "Tony Parker" | @@ -56,7 +56,7 @@ Feature: Pipe or use variable to store the match results | 42 | false | "Tim Duncan" | "LaMarcus Aldridge" | | 42 | false | "Tim Duncan" | "Manu Ginobili" | | 42 | false | "Tim Duncan" | "Marco Belinelli" | - | 42 | false | "Tim Duncan" | "Shaquile O'Neal" | + | 42 | false | "Tim Duncan" | "Shaquille O'Neal" | | 42 | false | "Tim Duncan" | "Tiago Splitter" | | 42 | true | "Tim Duncan" | "Tony Parker" | diff --git a/tests/tck/features/match/SeekByEdge.feature b/tests/tck/features/match/SeekByEdge.feature index e949f54899e..ef1629c7652 100644 --- a/tests/tck/features/match/SeekByEdge.feature +++ b/tests/tck/features/match/SeekByEdge.feature @@ -150,12 +150,12 @@ Feature: Match seek by edge | "Grant Hill" | "Magic" | | "Grant Hill" | "Suns" | | "Grant Hill" | "Clippers" | - | "Shaquile O'Neal" | "Magic" | - | "Shaquile O'Neal" | "Lakers" | - | "Shaquile O'Neal" | "Heat" | - | "Shaquile O'Neal" | "Suns" | - | "Shaquile O'Neal" | "Cavaliers" | - | "Shaquile O'Neal" | "Celtics" | + | "Shaquille O'Neal" | "Magic" | + | "Shaquille O'Neal" | "Lakers" | + | "Shaquille O'Neal" | "Heat" | + | "Shaquille O'Neal" | "Suns" | + | "Shaquille O'Neal" | "Cavaliers" | + | "Shaquille O'Neal" | "Celtics" | | "JaVale McGee" | "Wizards" | | "JaVale McGee" | "Nuggets" | | "JaVale McGee" | "Mavericks" | @@ -309,12 +309,12 @@ Feature: Match seek by edge | "Grant Hill" | "Magic" | | "Grant Hill" | "Suns" | | "Grant Hill" | "Clippers" | - | "Shaquile O'Neal" | "Magic" | - | "Shaquile O'Neal" | "Lakers" | - | "Shaquile O'Neal" | "Heat" | - | "Shaquile O'Neal" | "Suns" | - | "Shaquile O'Neal" | "Cavaliers" | - | "Shaquile O'Neal" | "Celtics" | + | "Shaquille O'Neal" | "Magic" | + | "Shaquille O'Neal" | "Lakers" | + | "Shaquille O'Neal" | "Heat" | + | "Shaquille O'Neal" | "Suns" | + | "Shaquille O'Neal" | "Cavaliers" | + | "Shaquille O'Neal" | "Celtics" | | "JaVale McGee" | "Wizards" | | "JaVale McGee" | "Nuggets" | | "JaVale McGee" | "Mavericks" | @@ -468,12 +468,12 @@ Feature: Match seek by edge | "Magic" | "Grant Hill" | | "Clippers" | "Grant Hill" | | "Pistons" | "Grant Hill" | - | "Celtics" | "Shaquile O'Neal" | - | "Cavaliers" | "Shaquile O'Neal" | - | "Lakers" | "Shaquile O'Neal" | - | "Magic" | "Shaquile O'Neal" | - | "Suns" | "Shaquile O'Neal" | - | "Heat" | "Shaquile O'Neal" | + | "Celtics" | "Shaquille O'Neal" | + | "Cavaliers" | "Shaquille O'Neal" | + | "Lakers" | "Shaquille O'Neal" | + | "Magic" | "Shaquille O'Neal" | + | "Suns" | "Shaquille O'Neal" | + | "Heat" | "Shaquille O'Neal" | | "Warriors" | "JaVale McGee" | | "Mavericks" | "JaVale McGee" | | "Wizards" | "JaVale McGee" | @@ -519,7 +519,7 @@ Feature: Match seek by edge | "LeBron James" | "Lakers" | | "JaVale McGee" | "Lakers" | | "Dwight Howard" | "Lakers" | - | "Shaquile O'Neal" | "Lakers" | + | "Shaquille O'Neal" | "Lakers" | | "Paul Gasol" | "Lakers" | | "Ricky Rubio" | "Jazz" | | "Boris Diaw" | "Jazz" | @@ -547,7 +547,7 @@ Feature: Match seek by edge | "Dwight Howard" | "Magic" | | "Tracy McGrady" | "Magic" | | "Vince Carter" | "Magic" | - | "Shaquile O'Neal" | "Magic" | + | "Shaquille O'Neal" | "Magic" | | "Carmelo Anthony" | "Rockets" | | "Tracy McGrady" | "Rockets" | | "Dwight Howard" | "Rockets" | @@ -585,7 +585,7 @@ Feature: Match seek by edge | "Ray Allen" | "Heat" | | "Amar'e Stoudemire" | "Heat" | | "Dwyane Wade" | "Heat" | - | "Shaquile O'Neal" | "Heat" | + | "Shaquille O'Neal" | "Heat" | | "Marc Gasol" | "Grizzlies" | | "Kyle Anderson" | "Grizzlies" | | "Vince Carter" | "Grizzlies" | @@ -601,7 +601,7 @@ Feature: Match seek by edge | "Grant Hill" | "Suns" | | "Vince Carter" | "Suns" | | "Amar'e Stoudemire" | "Suns" | - | "Shaquile O'Neal" | "Suns" | + | "Shaquille O'Neal" | "Suns" | | "Jason Kidd" | "Suns" | | "Boris Diaw" | "Suns" | | "David West" | "Hornets" | @@ -616,13 +616,13 @@ Feature: Match seek by edge | "Dwyane Wade" | "Cavaliers" | | "Kyrie Irving" | "Cavaliers" | | "Danny Green" | "Cavaliers" | - | "Shaquile O'Neal" | "Cavaliers" | + | "Shaquille O'Neal" | "Cavaliers" | | "Marco Belinelli" | "Kings" | | "Rajon Rondo" | "Kings" | | "Rudy Gay" | "Kings" | | "Vince Carter" | "Kings" | | "Aron Baynes" | "Celtics" | - | "Shaquile O'Neal" | "Celtics" | + | "Shaquille O'Neal" | "Celtics" | | "Kyrie Irving" | "Celtics" | | "Rajon Rondo" | "Celtics" | | "Ray Allen" | "Celtics" | @@ -786,12 +786,12 @@ Feature: Match seek by edge | "Yao Ming" | "Tracy McGrady" | "Magic" | | "Yao Ming" | "Tracy McGrady" | "Rockets" | | "Yao Ming" | "Tracy McGrady" | "Raptors" | - | "Yao Ming" | "Shaquile O'Neal" | "Suns" | - | "Yao Ming" | "Shaquile O'Neal" | "Celtics" | - | "Yao Ming" | "Shaquile O'Neal" | "Heat" | - | "Yao Ming" | "Shaquile O'Neal" | "Magic" | - | "Yao Ming" | "Shaquile O'Neal" | "Cavaliers" | - | "Yao Ming" | "Shaquile O'Neal" | "Lakers" | + | "Yao Ming" | "Shaquille O'Neal" | "Suns" | + | "Yao Ming" | "Shaquille O'Neal" | "Celtics" | + | "Yao Ming" | "Shaquille O'Neal" | "Heat" | + | "Yao Ming" | "Shaquille O'Neal" | "Magic" | + | "Yao Ming" | "Shaquille O'Neal" | "Cavaliers" | + | "Yao Ming" | "Shaquille O'Neal" | "Lakers" | | "Dejounte Murray" | "Chris Paul" | "Hornets" | | "Dejounte Murray" | "Chris Paul" | "Clippers" | | "Dejounte Murray" | "Chris Paul" | "Rockets" | @@ -867,12 +867,12 @@ Feature: Match seek by edge | "Grant Hill" | "Tracy McGrady" | "Magic" | | "Grant Hill" | "Tracy McGrady" | "Rockets" | | "Grant Hill" | "Tracy McGrady" | "Raptors" | - | "Shaquile O'Neal" | "Tim Duncan" | "Spurs" | - | "Shaquile O'Neal" | "JaVale McGee" | "Lakers" | - | "Shaquile O'Neal" | "JaVale McGee" | "Warriors" | - | "Shaquile O'Neal" | "JaVale McGee" | "Wizards" | - | "Shaquile O'Neal" | "JaVale McGee" | "Nuggets" | - | "Shaquile O'Neal" | "JaVale McGee" | "Mavericks" | + | "Shaquille O'Neal" | "Tim Duncan" | "Spurs" | + | "Shaquille O'Neal" | "JaVale McGee" | "Lakers" | + | "Shaquille O'Neal" | "JaVale McGee" | "Warriors" | + | "Shaquille O'Neal" | "JaVale McGee" | "Wizards" | + | "Shaquille O'Neal" | "JaVale McGee" | "Nuggets" | + | "Shaquille O'Neal" | "JaVale McGee" | "Mavericks" | Scenario Outline: Seek by edge with range When executing query: @@ -1037,8 +1037,8 @@ Feature: Match seek by edge | "Grant Hill" | "Kobe Bryant" | | "Grant Hill" | "Grant Hill" | | "Grant Hill" | "Rudy Gay" | - | "Shaquile O'Neal" | "Tony Parker" | - | "Shaquile O'Neal" | "Manu Ginobili" | + | "Shaquille O'Neal" | "Tony Parker" | + | "Shaquille O'Neal" | "Manu Ginobili" | When executing query: """ match (p1)-[:like*1..2]->(p2) return p1.name, p2.name @@ -1196,7 +1196,7 @@ Feature: Match seek by edge | "Damian Lillard" | "LaMarcus Aldridge" | | "Damian Lillard" | "Tony Parker" | | "Damian Lillard" | "Tim Duncan" | - | "Yao Ming" | "Shaquile O'Neal" | + | "Yao Ming" | "Shaquille O'Neal" | | "Yao Ming" | "JaVale McGee" | | "Yao Ming" | "Tim Duncan" | | "Yao Ming" | "Tracy McGrady" | @@ -1280,10 +1280,10 @@ Feature: Match seek by edge | "Grant Hill" | "Kobe Bryant" | | "Grant Hill" | "Grant Hill" | | "Grant Hill" | "Rudy Gay" | - | "Shaquile O'Neal" | "JaVale McGee" | - | "Shaquile O'Neal" | "Tim Duncan" | - | "Shaquile O'Neal" | "Tony Parker" | - | "Shaquile O'Neal" | "Manu Ginobili" | + | "Shaquille O'Neal" | "JaVale McGee" | + | "Shaquille O'Neal" | "Tim Duncan" | + | "Shaquille O'Neal" | "Tony Parker" | + | "Shaquille O'Neal" | "Manu Ginobili" | When executing query: """ match (p1)-[:serve*2]->(p2) return p1.name, p2.name @@ -1447,7 +1447,7 @@ Feature: Match seek by edge | "Dwyane Wade" | "Chris Paul" | | "Dwyane Wade" | "Dwyane Wade" | | "Dwyane Wade" | "LeBron James" | - | "Yao Ming" | "Shaquile O'Neal" | + | "Yao Ming" | "Shaquille O'Neal" | | "Yao Ming" | "Tracy McGrady" | | "Yao Ming" | "Kobe Bryant" | | "Yao Ming" | "Grant Hill" | diff --git a/tests/tck/features/match/VariableLengthPattern.feature b/tests/tck/features/match/VariableLengthPattern.feature index 74adff6f9bf..64f87063e60 100644 --- a/tests/tck/features/match/VariableLengthPattern.feature +++ b/tests/tck/features/match/VariableLengthPattern.feature @@ -257,7 +257,7 @@ Feature: Variable length Pattern match (m to n) | [[:like "Tim Duncan"->"Manu Ginobili"]] | | [[:like "Tim Duncan"->"Tony Parker"]] | | [[:like "Tim Duncan"<-"Dejounte Murray"]] | - | [[:like "Tim Duncan"<-"Shaquile O'Neal"]] | + | [[:like "Tim Duncan"<-"Shaquille O'Neal"]] | | [[:like "Tim Duncan"<-"Marco Belinelli"]] | | [[:like "Tim Duncan"<-"Boris Diaw"]] | | [[:like "Tim Duncan"<-"Manu Ginobili"]] | @@ -278,7 +278,7 @@ Feature: Variable length Pattern match (m to n) | [[:like "Tim Duncan"->"Manu Ginobili"]] | | [[:like "Tim Duncan"->"Tony Parker"]] | | [[:like "Tim Duncan"<-"Dejounte Murray"]] | - | [[:like "Tim Duncan"<-"Shaquile O'Neal"]] | + | [[:like "Tim Duncan"<-"Shaquille O'Neal"]] | | [[:like "Tim Duncan"<-"Marco Belinelli"]] | | [[:like "Tim Duncan"<-"Boris Diaw"]] | | [[:like "Tim Duncan"<-"Manu Ginobili"]] | diff --git a/tests/tck/features/match/VariableLengthPattern.intVid.feature b/tests/tck/features/match/VariableLengthPattern.intVid.feature index 1b2b6a67e7b..33719af1ed7 100644 --- a/tests/tck/features/match/VariableLengthPattern.intVid.feature +++ b/tests/tck/features/match/VariableLengthPattern.intVid.feature @@ -257,7 +257,7 @@ Feature: Integer Vid Variable length Pattern match (m to n) | [[:like "Tim Duncan"->"Manu Ginobili"]] | | [[:like "Tim Duncan"->"Tony Parker"]] | | [[:like "Tim Duncan"<-"Dejounte Murray"]] | - | [[:like "Tim Duncan"<-"Shaquile O'Neal"]] | + | [[:like "Tim Duncan"<-"Shaquille O'Neal"]] | | [[:like "Tim Duncan"<-"Marco Belinelli"]] | | [[:like "Tim Duncan"<-"Boris Diaw"]] | | [[:like "Tim Duncan"<-"Manu Ginobili"]] | @@ -278,7 +278,7 @@ Feature: Integer Vid Variable length Pattern match (m to n) | [[:like "Tim Duncan"->"Manu Ginobili"]] | | [[:like "Tim Duncan"->"Tony Parker"]] | | [[:like "Tim Duncan"<-"Dejounte Murray"]] | - | [[:like "Tim Duncan"<-"Shaquile O'Neal"]] | + | [[:like "Tim Duncan"<-"Shaquille O'Neal"]] | | [[:like "Tim Duncan"<-"Marco Belinelli"]] | | [[:like "Tim Duncan"<-"Boris Diaw"]] | | [[:like "Tim Duncan"<-"Manu Ginobili"]] | diff --git a/tests/tck/features/match/With.feature b/tests/tck/features/match/With.feature index 0cccf1132a3..57a9de0c5b7 100644 --- a/tests/tck/features/match/With.feature +++ b/tests/tck/features/match/With.feature @@ -92,8 +92,8 @@ Feature: With clause RETURN collect(names) """ Then the result should be, in any order, with relax comparison: - | collect(names) | - | ["Tony Parker", "Tiago Splitter", "Spurs", "Shaquile O'Neal", "Marco Belinelli"] | + | collect(names) | + | ["Tony Parker", "Tiago Splitter", "Spurs", "Shaquille O'Neal", "Marco Belinelli"] | When profiling query: """ MATCH (v:player) @@ -105,7 +105,7 @@ Feature: With clause """ Then the result should be, in order, with relax comparison: | v | age | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | 47 | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | 47 | | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | 46 | | ("Jason Kidd" :player{age: 45, name: "Jason Kidd"}) | 45 | | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | 45 | diff --git a/tests/tck/features/match/ZeroStep.feature b/tests/tck/features/match/ZeroStep.feature index 782c079d06b..cbd0ba0a6f0 100644 --- a/tests/tck/features/match/ZeroStep.feature +++ b/tests/tck/features/match/ZeroStep.feature @@ -41,7 +41,7 @@ Feature: Variable length Pattern match (0 step) | [[:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}]] | | [[:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}]] | | [[:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}]] | - | [[:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}]] | + | [[:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}]] | | [[:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}]] | | [[:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}]] | | [[:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}]] | @@ -118,7 +118,7 @@ Feature: Variable length Pattern match (0 step) | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Grant Hill"->"Tracy McGrady" @0 {likeness: 90}]] | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Vince Carter"->"Tracy McGrady" @0 {likeness: 90}]] | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | | [[:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}], [:like "Tiago Splitter"->"Manu Ginobili" @0 {likeness: 90}]] | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | - | [[:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}], [:like "Yao Ming"->"Shaquile O'Neal" @0 {likeness: 90}]] | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | [[:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}], [:like "Yao Ming"->"Shaquille O'Neal" @0 {likeness: 90}]] | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | [[:like "Tracy McGrady"->"Grant Hill" @0 {likeness: 90}], [:like "Grant Hill"->"Tracy McGrady" @0 {likeness: 90}]] | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}]] | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | | [[:like "Tiago Splitter"->"Manu Ginobili" @0 {likeness: 90}], [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}]] | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | @@ -271,14 +271,14 @@ Feature: Variable length Pattern match (0 step) | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | diff --git a/tests/tck/features/match/ZeroStep.intVid.feature b/tests/tck/features/match/ZeroStep.intVid.feature index bfedac3d8f3..457fda55f0e 100644 --- a/tests/tck/features/match/ZeroStep.intVid.feature +++ b/tests/tck/features/match/ZeroStep.intVid.feature @@ -41,7 +41,7 @@ Feature: Variable length Pattern match int vid (0 step) | [[:like "LaMarcus Aldridge"->"Tim Duncan" @0 {likeness: 75}]] | | [[:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}]] | | [[:like "Marco Belinelli"->"Tim Duncan" @0 {likeness: 55}]] | - | [[:like "Shaquile O'Neal"->"Tim Duncan" @0 {likeness: 80}]] | + | [[:like "Shaquille O'Neal"->"Tim Duncan" @0 {likeness: 80}]] | | [[:like "Tiago Splitter"->"Tim Duncan" @0 {likeness: 80}]] | | [[:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}]] | | [[:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}]] | @@ -118,7 +118,7 @@ Feature: Variable length Pattern match int vid (0 step) | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Grant Hill"->"Tracy McGrady" @0 {likeness: 90}]] | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Vince Carter"->"Tracy McGrady" @0 {likeness: 90}]] | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | | [[:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}], [:like "Tiago Splitter"->"Manu Ginobili" @0 {likeness: 90}]] | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | - | [[:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}], [:like "Yao Ming"->"Shaquile O'Neal" @0 {likeness: 90}]] | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | [[:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}], [:like "Yao Ming"->"Shaquille O'Neal" @0 {likeness: 90}]] | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | [[:like "Tracy McGrady"->"Grant Hill" @0 {likeness: 90}], [:like "Grant Hill"->"Tracy McGrady" @0 {likeness: 90}]] | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | | [[:like "Tracy McGrady"->"Rudy Gay" @0 {likeness: 90}], [:like "Yao Ming"->"Tracy McGrady" @0 {likeness: 90}]] | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | | [[:like "Tiago Splitter"->"Manu Ginobili" @0 {likeness: 90}], [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}]] | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | @@ -271,14 +271,14 @@ Feature: Variable length Pattern match int vid (0 step) | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Marco Belinelli" :player{age: 32, name: "Marco Belinelli"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | | ("Tiago Splitter" :player{age: 34, name: "Tiago Splitter"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | diff --git a/tests/tck/features/optimizer/IndexScanRule.feature b/tests/tck/features/optimizer/IndexScanRule.feature index 8f8e5be6d84..7a5bf6e5d71 100644 --- a/tests/tck/features/optimizer/IndexScanRule.feature +++ b/tests/tck/features/optimizer/IndexScanRule.feature @@ -6,7 +6,7 @@ Feature: Match index selection Background: Given a graph with space named "nba" - Scenario: and filter embeding + Scenario: and filter embedding When profiling query: """ MATCH (v:player) @@ -66,7 +66,7 @@ Feature: Match index selection | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE","column":"age","beginValue":"30","endValue":"40","includeBegin":"false","includeEnd":"true"}}} | | 0 | Start | | | - Scenario: or filter embeding + Scenario: or filter embedding When profiling query: """ MATCH (v:player) @@ -89,7 +89,7 @@ Feature: Match index selection | ("Vince Carter" :player{age: 42, name: "Vince Carter"}) | | ("Ray Allen" :player{age: 43, name: "Ray Allen"}) | | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | - | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | And the execution plan should be: | id | name | dependencies | operator info | | 6 | Project | 2 | | @@ -110,7 +110,7 @@ Feature: Match index selection | v | n | | ("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"}) | ("Grant Hill" :player{age: 46, name: "Grant Hill"}) | | ("Amar'e Stoudemire" :player{age: 36, name: "Amar'e Stoudemire"}) | ("Steve Nash" :player{age: 45, name: "Steve Nash"}) | - | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | | ("Aron Baynes" :player{age: 32, name: "Aron Baynes"}) | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | And the execution plan should be: | id | name | dependencies | operator info | diff --git a/tests/tck/features/optimizer/PushFilterDownProjectRule.feature b/tests/tck/features/optimizer/PushFilterDownProjectRule.feature index 8dc91d38b33..6b54e7c644f 100644 --- a/tests/tck/features/optimizer/PushFilterDownProjectRule.feature +++ b/tests/tck/features/optimizer/PushFilterDownProjectRule.feature @@ -50,15 +50,15 @@ Feature: Push Filter down Project rule RETURN DISTINCT a, b, cage """ Then the result should be, in any order: - | a | b | cage | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | 39 | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | 32 | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"}) | NULL | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | NULL | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 43 | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 37 | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 35 | - | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 30 | + | a | b | cage | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | 39 | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | 32 | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"}) | NULL | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | NULL | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 43 | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 37 | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 35 | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}) | 30 | And the execution plan should be: | id | name | dependencies | operator info | | 25 | DataCollect | 24 | | diff --git a/tests/tck/features/path/AllPath.IntVid.feature b/tests/tck/features/path/AllPath.IntVid.feature index 1fd77aabdaf..3f20e83bd1c 100644 --- a/tests/tck/features/path/AllPath.IntVid.feature +++ b/tests/tck/features/path/AllPath.IntVid.feature @@ -218,30 +218,30 @@ Feature: Integer Vid All Path WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | path | + | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM hash("Tony Parker") TO hash("Yao Ming") OVER * BIDIRECT WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2016, start_year: 2001}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | path | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2016, start_year: 2001}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM hash("Yao Ming") TO hash("Danny Green") OVER * BIDIRECT WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs" :team{name: "Spurs"})<-[:serve@0 {end_year: 2018, start_year: 2010}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | path | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs" :team{name: "Spurs"})<-[:serve@0 {end_year: 2018, start_year: 2010}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | Scenario: Integer Vid Dangling edge Given an empty graph diff --git a/tests/tck/features/path/AllPath.feature b/tests/tck/features/path/AllPath.feature index cd188774c7e..a73a28fc1f7 100644 --- a/tests/tck/features/path/AllPath.feature +++ b/tests/tck/features/path/AllPath.feature @@ -218,30 +218,30 @@ Feature: All Path WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | path | + | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM "Tony Parker" TO "Yao Ming" OVER * BIDIRECT WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2016, start_year: 2001}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | path | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2016, start_year: 2001}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM "Yao Ming" TO "Danny Green" OVER * BIDIRECT WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | - | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs" :team{name: "Spurs"})<-[:serve@0 {end_year: 2018, start_year: 2010}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | path | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | + | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs" :team{name: "Spurs"})<-[:serve@0 {end_year: 2018, start_year: 2010}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | Scenario: Dangling edge Given an empty graph diff --git a/tests/tck/features/path/ShortestPath.IntVid.feature b/tests/tck/features/path/ShortestPath.IntVid.feature index 701baf08f69..47f71eaef0d 100644 --- a/tests/tck/features/path/ShortestPath.IntVid.feature +++ b/tests/tck/features/path/ShortestPath.IntVid.feature @@ -94,10 +94,10 @@ Feature: Integer Vid Shortest Path """ Then the result should be, in any order, with relax comparison: | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Tony Parker")-[:like]->("Tim Duncan")-[:teammate]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Tony Parker")-[:teammate]->("Tim Duncan")-[:teammate]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -110,23 +110,23 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | - | <("Tony Parker")-[:serve]->("Spurs")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | <("Tony Parker")-[:serve]->("Spurs")> | When executing query: """ FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tim Duncan"), hash("Spurs"), hash("Lakers") OVER * UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | Scenario: Integer Vid [5] MultiPair Shortest Path When executing query: @@ -136,7 +136,7 @@ Feature: Integer Vid Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:serve]->("Spurs")> | | <("Marco Belinelli")-[:serve@1]->("Spurs")> | @@ -169,42 +169,42 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tony Parker"), hash("Tracy McGrady") OVER like,serve UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")> | Scenario: Integer Vid [9] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquile O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid [10] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquile O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid [11] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquile O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER like UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: Integer Vid [12] MultiPair Shortest Path When executing query: @@ -232,30 +232,30 @@ Feature: Integer Vid Shortest Path | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [2] MultiPair Shortest Path Run Time input When executing query: """ - YIELD hash("Shaquile O\'Neal") AS src + YIELD hash("Shaquille O\'Neal") AS src | FIND SHORTEST PATH FROM $-.src TO hash("Manu Ginobili") OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid [3] MultiPair Shortest Path Run Time input When executing query: """ YIELD hash("Manu Ginobili") AS dst - | FIND SHORTEST PATH FROM hash("Shaquile O\'Neal") TO $-.dst OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO $-.dst OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid [4] MultiPair Shortest Path Run Time input When executing query: @@ -266,7 +266,7 @@ Feature: Integer Vid Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [5] MultiPair Shortest Path Run Time input When executing query: @@ -277,7 +277,7 @@ Feature: Integer Vid Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [6] MultiPair Shortest Path Run Time input When executing query: @@ -324,13 +324,13 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [2] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquile O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS | ORDER BY $-.path | LIMIT 2 """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: Integer Vid [3] Shortest Path With Limit When executing query: @@ -413,13 +413,13 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid [3] Shortest Path BIDIRECT When executing query: @@ -427,25 +427,25 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")<-[:teammate]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:like]-("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")<-[:teammate]-("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:like]->("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:teammate]->("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")<-[:like]-("Dejounte Murray")-[:like]->("LeBron James")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")<-[:serve]-("Paul Gasol")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Hornets")<-[:serve]-("Dwight Howard")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:teammate]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:like]-("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")<-[:teammate]-("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:like]->("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:teammate]->("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")<-[:like]-("Dejounte Murray")-[:like]->("LeBron James")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")<-[:serve]-("Paul Gasol")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Hornets")<-[:serve]-("Dwight Howard")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | Scenario: Integer Vid Shortest Path With PROP When executing query: @@ -467,13 +467,13 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal": player{age: 47, name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2004,start_year: 1996}]->("Lakers": team{name: "Lakers"})> | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | path | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal": player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004,start_year: 1996}]->("Lakers": team{name: "Lakers"})> | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | Scenario: Integer Vid Shortest Path With Filter When executing query: @@ -481,12 +481,12 @@ Feature: Integer Vid Shortest Path FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player {age: 47,name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | path | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY WHERE like.likeness > 70 @@ -502,7 +502,7 @@ Feature: Integer Vid Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:like@0 {likeness: 90}]->("Rudy Gay" :player{age: 32, name: "Rudy Gay"})-[:like@0 {likeness: 70}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | - | <("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | + | <("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS diff --git a/tests/tck/features/path/ShortestPath.feature b/tests/tck/features/path/ShortestPath.feature index bffd7b3bcaa..55587e0d76a 100644 --- a/tests/tck/features/path/ShortestPath.feature +++ b/tests/tck/features/path/ShortestPath.feature @@ -94,10 +94,10 @@ Feature: Shortest Path """ Then the result should be, in any order, with relax comparison: | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Tony Parker")-[:like]->("Tim Duncan")-[:teammate]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Tony Parker")-[:teammate]->("Tim Duncan")-[:teammate]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -110,23 +110,23 @@ Feature: Shortest Path FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | - | <("Tony Parker")-[:serve]->("Spurs")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | <("Tony Parker")-[:serve]->("Spurs")> | When executing query: """ FIND SHORTEST PATH FROM "Yao Ming" TO "Tim Duncan", "Spurs", "Lakers" OVER * UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | Scenario: [5] MultiPair Shortest Path When executing query: @@ -136,7 +136,7 @@ Feature: Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:serve]->("Spurs")> | | <("Marco Belinelli")-[:serve@1]->("Spurs")> | @@ -169,42 +169,42 @@ Feature: Shortest Path FIND SHORTEST PATH FROM "Yao Ming" TO "Tony Parker","Tracy McGrady" OVER like,serve UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")> | Scenario: [9] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquile O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: [10] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquile O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: [11] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquile O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER like UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: [12] MultiPair Shortest Path When executing query: @@ -232,30 +232,30 @@ Feature: Shortest Path | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | path | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [2] MultiPair Shortest Path Run Time input When executing query: """ - YIELD "Shaquile O\'Neal" AS src + YIELD "Shaquille O\'Neal" AS src | FIND SHORTEST PATH FROM $-.src TO "Manu Ginobili" OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: [3] MultiPair Shortest Path Run Time input When executing query: """ YIELD "Manu Ginobili" AS dst - | FIND SHORTEST PATH FROM "Shaquile O\'Neal" TO $-.dst OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO $-.dst OVER * UPTO 5 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | Scenario: [4] MultiPair Shortest Path Run Time input When executing query: @@ -266,7 +266,7 @@ Feature: Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [5] MultiPair Shortest Path Run Time input When executing query: @@ -277,7 +277,7 @@ Feature: Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [6] MultiPair Shortest Path Run Time input When executing query: @@ -324,13 +324,13 @@ Feature: Shortest Path Scenario: [2] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM "Shaquile O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS | ORDER BY $-.path | LIMIT 2 """ Then the result should be, in any order, with relax comparison: - | path | - | <("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | path | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: [3] Shortest Path With Limit When executing query: @@ -413,13 +413,13 @@ Feature: Shortest Path FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | Scenario: [3] Shortest Path BIDIRECT When executing query: @@ -427,25 +427,25 @@ Feature: Shortest Path FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 3 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:like]->("Tim Duncan")<-[:teammate]-("Manu Ginobili")> | - | <("Yao Ming")-[:like]->("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:like]-("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")<-[:teammate]-("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:like]->("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:teammate]->("Tim Duncan")<-[:like]-("Shaquile O'Neal")-[:serve]->("Lakers")> | - | <("Tony Parker")<-[:like]-("Dejounte Murray")-[:like]->("LeBron James")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")<-[:serve]-("Paul Gasol")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Hornets")<-[:serve]-("Dwight Howard")-[:serve]->("Lakers")> | - | <("Tony Parker")-[:serve]->("Spurs")> | - | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | - | <("Tony Parker")-[:like]->("Manu Ginobili")> | - | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | + | path | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:teammate]-("Manu Ginobili")> | + | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:like]-("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")<-[:teammate]-("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:like]->("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:teammate]->("Tim Duncan")<-[:like]-("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Tony Parker")<-[:like]-("Dejounte Murray")-[:like]->("LeBron James")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")<-[:serve]-("Paul Gasol")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Hornets")<-[:serve]-("Dwight Howard")-[:serve]->("Lakers")> | + | <("Tony Parker")-[:serve]->("Spurs")> | + | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | + | <("Tony Parker")-[:like]->("Manu Ginobili")> | + | <("Tony Parker")-[:teammate]->("Manu Ginobili")> | Scenario: Shortest Path With PROP When executing query: @@ -467,13 +467,13 @@ Feature: Shortest Path FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player {age: 47,name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | path | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | Scenario: Shortest Path With Filter When executing query: @@ -481,12 +481,12 @@ Feature: Shortest Path FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS """ Then the result should be, in any order, with relax comparison: - | path | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquile O'Neal" :player {age: 47,name: "Shaquile O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | - | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | - | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | path | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | + | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY WHERE like.likeness > 70 @@ -502,7 +502,7 @@ Feature: Shortest Path Then the result should be, in any order, with relax comparison: | path | | <("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:like@0 {likeness: 90}]->("Rudy Gay" :player{age: 32, name: "Rudy Gay"})-[:like@0 {likeness: 70}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | - | <("Shaquile O'Neal" :player{age: 47, name: "Shaquile O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | + | <("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS diff --git a/tests/tck/features/schema/Comment.feature b/tests/tck/features/schema/Comment.feature index ebb69b54b62..f877007f633 100644 --- a/tests/tck/features/schema/Comment.feature +++ b/tests/tck/features/schema/Comment.feature @@ -224,6 +224,6 @@ Feature: Schema Comment | "test_comment_edge_index" | 'CREATE EDGE INDEX `test_comment_edge_index` ON `test_comment_edge` (\n `name`(8)\n) comment = "The edge index of person name."' | Examples: - | tag_of_person_comment | tag_of_person_comment_modified | edge_of_person_comment | edge_of_person_comment_modified | - | "The tag of person infomation." | "The tag of person infomation modified." | "The edge of person information." | "The edge of person infomation modified." | - | "个人信息标签。" | "修改过的个人信息标签。" | "个人信息边。" | "修改过的个人信息边。" | + | tag_of_person_comment | tag_of_person_comment_modified | edge_of_person_comment | edge_of_person_comment_modified | + | "The tag of person information." | "The tag of person information modified." | "The edge of person information." | "The edge of person information modified." | + | "个人信息标签。" | "修改过的个人信息标签。" | "个人信息边。" | "修改过的个人信息边。" | diff --git a/tests/tck/features/schema/Schema.feature b/tests/tck/features/schema/Schema.feature index 2a65858dda1..252c3589bf6 100644 --- a/tests/tck/features/schema/Schema.feature +++ b/tests/tck/features/schema/Schema.feature @@ -270,7 +270,7 @@ Feature: Insert string vid of vertex and edge """ CREATE EDGE buy_type_mismatch(id int, time_ string DEFAULT 0) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # existent edge When executing query: """ @@ -661,37 +661,37 @@ Feature: Insert string vid of vertex and edge """ ALTER TAG tag_not_null_default1 ADD (col1 string DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with wrong type default value of timestamp when add When executing query: """ ALTER TAG tag_not_null_default1 ADD (col1 timestamp DEFAULT -10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with wrong type default value of float when add When executing query: """ ALTER TAG tag_not_null_default1 ADD (col1 float DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with wrong type default value of bool when add When executing query: """ ALTER TAG tag_not_null_default1 ADD (col1 bool DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with wrong type default value of int8 when add When executing query: """ ALTER TAG tag_not_null_default1 ADD (col1 int8 DEFAULT 10000) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with wrong type default value of time when add When executing query: """ ALTER TAG tag_not_null_default1 ADD (col1 time DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter tag with out of rang string default value of fixed_string when add When executing query: """ @@ -711,44 +711,44 @@ Feature: Insert string vid of vertex and edge """ ALTER TAG tag_not_null_default1 CHANGE (name FIXED_STRING(10) DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of string when add When executing query: """ CREATE EDGE edge_not_null_default1(name string NOT NULL DEFAULT "N/A"); ALTER EDGE edge_not_null_default1 ADD (col1 string DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of timestamp when add When executing query: """ ALTER EDGE edge_not_null_default1 ADD (col1 timestamp DEFAULT -10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of float when add When executing query: """ ALTER EDGE edge_not_null_default1 ADD (col1 float DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of bool when add When executing query: """ ALTER EDGE edge_not_null_default1 ADD (col1 bool DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of int8 when add When executing query: """ ALTER EDGE edge_not_null_default1 ADD (col1 int8 DEFAULT 10000) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with wrong type default value of time when add When executing query: """ ALTER EDGE edge_not_null_default1 ADD (col1 time DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! # test alter edge with out of rang string default value of fixed_string when add When executing query: """ @@ -768,7 +768,7 @@ Feature: Insert string vid of vertex and edge """ ALTER EDGE edge_not_null_default1 CHANGE (name FIXED_STRING(10) DEFAULT 10) """ - Then a ExecutionError should be raised at runtime: Invalid parm! + Then a ExecutionError should be raised at runtime: Invalid param! When executing query: """ DROP SPACE issue2009; diff --git a/tests/tck/features/subgraph/subgraph.IntVid.feature b/tests/tck/features/subgraph/subgraph.IntVid.feature index d2bd7b278bb..5c510feec24 100644 --- a/tests/tck/features/subgraph/subgraph.IntVid.feature +++ b/tests/tck/features/subgraph/subgraph.IntVid.feature @@ -19,7 +19,7 @@ Feature: Integer Vid subgraph Then a SemanticError should be raised at runtime: `$a.id', not exist variable `a' When executing query: """ - GET SUBGRAPH WITH PROP FROM hash("Tim Duncan") YIELD vertexs + GET SUBGRAPH WITH PROP FROM hash("Tim Duncan") YIELD invalidColumn """ Then a SemanticError should be raised at runtime: Get Subgraph only support YIELD vertices OR edges When executing query: @@ -120,12 +120,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | @@ -157,12 +157,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Pistons") | [:serve "LeBron James"->"Cavaliers"@1] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | ("Kings") | [:serve "Rudy Gay"->"Kings"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | ("Jazz") | [:serve "Rudy Gay"->"Raptors"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | ("LeBron James") | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Paul Gasol") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("Kyle Anderson") | [:serve "LeBron James"->"Heat"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Paul Gasol"->"Bulls"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | ("Yao Ming") | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("James Harden") | [:like "Tracy McGrady"->"Rudy Gay"@0] | @@ -175,14 +175,14 @@ Feature: Integer Vid subgraph | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Heat") | [:serve "David West"->"Warriors"@0] | | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Lakers") | [:serve "Jonathon Simmons"->"76ers"@0] | | | | [:serve "Boris Diaw"->"Suns"@0] | ("Suns") | [:serve "Jonathon Simmons"->"Magic"@0] | - | | | [:like "Yao Ming"->"Shaquile O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | - | | | [:like "Shaquile O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | - | | | [:serve "Shaquile O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | - | | | [:serve "Shaquile O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | - | | | [:serve "Shaquile O\'Neal"->"Heat"@0] | ("Cory Joseph") | | - | | | [:serve "Shaquile O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | - | | | [:serve "Shaquile O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | - | | | [:serve "Shaquile O\'Neal"->"Suns"@0] | ("Bulls") | | + | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | + | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | + | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | + | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | + | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | ("Cory Joseph") | | + | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | + | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | + | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | ("Bulls") | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Warriors") | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | @@ -240,14 +240,14 @@ Feature: Integer Vid subgraph | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Danny Green") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("Yao Ming") | | [:like "Danny Green"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Rudy Gay") | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | + | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Danny Green"->"Marco Belinelli"@0] | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | @@ -274,7 +274,7 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Aron Baynes") | [:serve "Danny Green"->"Cavaliers"@0] | ("Bulls") | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Manu Ginobili") | [:serve "Danny Green"->"Raptors"@0] | ("Trail Blazers") | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Danny Green"->"Spurs"@0] | ("Celtics") | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Hawks") | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Spurs") | [:serve "Marco Belinelli"->"76ers"@0] | ("Warriors") | | | | | [:serve "Marco Belinelli"->"Bulls"@0] | ("Cavaliers") | | @@ -305,13 +305,13 @@ Feature: Integer Vid subgraph | | | [:serve "Tiago Splitter"->"76ers"@0] | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Celtics"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Heat"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Celtics"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Heat"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | [:serve "Tony Parker"->"Hornets"@0] | | | | | | [:serve "Tony Parker"->"Spurs"@0] | | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | @@ -411,12 +411,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -449,12 +449,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -496,80 +496,80 @@ Feature: Integer Vid subgraph GET SUBGRAPH WITH PROP 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Yao Ming")] | <[edge1]> | @@ -587,21 +587,21 @@ Feature: Integer Vid subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -659,8 +659,8 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -673,8 +673,8 @@ Feature: Integer Vid subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Tim Duncan")] | <[edge1]> | @@ -722,80 +722,80 @@ Feature: Integer Vid subgraph GET SUBGRAPH 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Yao Ming")] | <[edge1]> | @@ -813,21 +813,21 @@ Feature: Integer Vid subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -885,8 +885,8 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -899,8 +899,8 @@ Feature: Integer Vid subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Tim Duncan")] | <[edge1]> | @@ -971,12 +971,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | @@ -1008,12 +1008,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Pistons") | [:serve "LeBron James"->"Cavaliers"@1] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | ("Kings") | [:serve "Rudy Gay"->"Kings"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | ("Jazz") | [:serve "Rudy Gay"->"Raptors"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | ("LeBron James") | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Paul Gasol") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("Kyle Anderson") | [:serve "LeBron James"->"Heat"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Paul Gasol"->"Bulls"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | ("Yao Ming") | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("James Harden") | [:like "Tracy McGrady"->"Rudy Gay"@0] | @@ -1026,14 +1026,14 @@ Feature: Integer Vid subgraph | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Heat") | [:serve "David West"->"Warriors"@0] | | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Lakers") | [:serve "Jonathon Simmons"->"76ers"@0] | | | | [:serve "Boris Diaw"->"Suns"@0] | ("Suns") | [:serve "Jonathon Simmons"->"Magic"@0] | - | | | [:like "Yao Ming"->"Shaquile O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | - | | | [:like "Shaquile O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | - | | | [:serve "Shaquile O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | - | | | [:serve "Shaquile O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | - | | | [:serve "Shaquile O\'Neal"->"Heat"@0] | ("Cory Joseph") | | - | | | [:serve "Shaquile O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | - | | | [:serve "Shaquile O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | - | | | [:serve "Shaquile O\'Neal"->"Suns"@0] | ("Bulls") | | + | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | + | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | + | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | + | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | + | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | ("Cory Joseph") | | + | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | + | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | + | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | ("Bulls") | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Warriors") | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | @@ -1091,14 +1091,14 @@ Feature: Integer Vid subgraph | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Danny Green") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("Yao Ming") | | [:like "Danny Green"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Rudy Gay") | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | + | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Danny Green"->"Marco Belinelli"@0] | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | @@ -1125,7 +1125,7 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Aron Baynes") | [:serve "Danny Green"->"Cavaliers"@0] | ("Bulls") | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Manu Ginobili") | [:serve "Danny Green"->"Raptors"@0] | ("Trail Blazers") | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Danny Green"->"Spurs"@0] | ("Celtics") | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Hawks") | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Spurs") | [:serve "Marco Belinelli"->"76ers"@0] | ("Warriors") | | | | | [:serve "Marco Belinelli"->"Bulls"@0] | ("Cavaliers") | | @@ -1156,13 +1156,13 @@ Feature: Integer Vid subgraph | | | [:serve "Tiago Splitter"->"76ers"@0] | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Celtics"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Heat"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Celtics"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Heat"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | [:serve "Tony Parker"->"Hornets"@0] | | | | | | [:serve "Tony Parker"->"Spurs"@0] | | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | @@ -1262,12 +1262,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -1300,12 +1300,12 @@ Feature: Integer Vid subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -1347,80 +1347,80 @@ Feature: Integer Vid subgraph GET SUBGRAPH WITH PROP 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like YIELD VERTiCeS as nodes , EDgES as relationships """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Yao Ming")] | <[edge1]> | @@ -1438,21 +1438,21 @@ Feature: Integer Vid subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -1510,8 +1510,8 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1524,8 +1524,8 @@ Feature: Integer Vid subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Tim Duncan")] | <[edge1]> | @@ -1573,80 +1573,80 @@ Feature: Integer Vid subgraph GET SUBGRAPH 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like YIELD VERTICES as nodes, EDGES as relationships """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Yao Ming")] | <[edge1]> | @@ -1664,21 +1664,21 @@ Feature: Integer Vid subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -1736,8 +1736,8 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1750,8 +1750,8 @@ Feature: Integer Vid subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Tim Duncan")] | <[edge1]> | @@ -1792,11 +1792,11 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Manu Ginobili"@0] | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | [:like "Tony Parker"->"Manu Ginobili"@0] | - | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | + | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | @@ -1838,8 +1838,8 @@ Feature: Integer Vid subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1852,8 +1852,8 @@ Feature: Integer Vid subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Tim Duncan")] | <[edge1]> | diff --git a/tests/tck/features/subgraph/subgraph.feature b/tests/tck/features/subgraph/subgraph.feature index 24250f98b9d..622b28b8fae 100644 --- a/tests/tck/features/subgraph/subgraph.feature +++ b/tests/tck/features/subgraph/subgraph.feature @@ -19,7 +19,7 @@ Feature: subgraph Then a SemanticError should be raised at runtime: `$a.id', not exist variable `a' When executing query: """ - GET SUBGRAPH WITH PROP FROM "Tim Duncan" YIELD vertexs + GET SUBGRAPH WITH PROP FROM "Tim Duncan" YIELD invalidColumn """ Then a SemanticError should be raised at runtime: Get Subgraph only support YIELD vertices OR edges When executing query: @@ -120,12 +120,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | @@ -157,12 +157,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Pistons") | [:serve "LeBron James"->"Cavaliers"@1] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | ("Kings") | [:serve "Rudy Gay"->"Kings"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | ("Jazz") | [:serve "Rudy Gay"->"Raptors"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | ("LeBron James") | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Paul Gasol") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("Kyle Anderson") | [:serve "LeBron James"->"Heat"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Paul Gasol"->"Bulls"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | ("Yao Ming") | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("James Harden") | [:like "Tracy McGrady"->"Rudy Gay"@0] | @@ -175,14 +175,14 @@ Feature: subgraph | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Heat") | [:serve "David West"->"Warriors"@0] | | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Lakers") | [:serve "Jonathon Simmons"->"76ers"@0] | | | | [:serve "Boris Diaw"->"Suns"@0] | ("Suns") | [:serve "Jonathon Simmons"->"Magic"@0] | - | | | [:like "Yao Ming"->"Shaquile O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | - | | | [:like "Shaquile O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | - | | | [:serve "Shaquile O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | - | | | [:serve "Shaquile O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | - | | | [:serve "Shaquile O\'Neal"->"Heat"@0] | ("Cory Joseph") | | - | | | [:serve "Shaquile O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | - | | | [:serve "Shaquile O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | - | | | [:serve "Shaquile O\'Neal"->"Suns"@0] | ("Bulls") | | + | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | + | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | + | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | + | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | + | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | ("Cory Joseph") | | + | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | + | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | + | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | ("Bulls") | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Warriors") | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | @@ -240,14 +240,14 @@ Feature: subgraph | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Danny Green") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("Yao Ming") | | [:like "Danny Green"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Rudy Gay") | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | + | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Danny Green"->"Marco Belinelli"@0] | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | @@ -274,7 +274,7 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Aron Baynes") | [:serve "Danny Green"->"Cavaliers"@0] | ("Bulls") | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Manu Ginobili") | [:serve "Danny Green"->"Raptors"@0] | ("Trail Blazers") | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Danny Green"->"Spurs"@0] | ("Celtics") | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Hawks") | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Spurs") | [:serve "Marco Belinelli"->"76ers"@0] | ("Warriors") | | | | | [:serve "Marco Belinelli"->"Bulls"@0] | ("Cavaliers") | | @@ -305,13 +305,13 @@ Feature: subgraph | | | [:serve "Tiago Splitter"->"76ers"@0] | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Celtics"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Heat"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | | | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Celtics"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Heat"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | | | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | [:serve "Tony Parker"->"Hornets"@0] | | | | | | [:serve "Tony Parker"->"Spurs"@0] | | | | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | @@ -411,12 +411,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -449,12 +449,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -496,80 +496,80 @@ Feature: subgraph GET SUBGRAPH WITH PROP 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like YIELD VERTICES as nodes, EDGES as relationships """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Yao Ming")] | <[edge1]> | @@ -587,21 +587,21 @@ Feature: subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -659,8 +659,8 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -673,8 +673,8 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Tim Duncan")] | <[edge1]> | @@ -722,80 +722,80 @@ Feature: subgraph GET SUBGRAPH 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like YIELD vertices as nodes, edges as relationships """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Yao Ming")] | <[edge1]> | @@ -813,21 +813,21 @@ Feature: subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -885,8 +885,8 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -899,8 +899,8 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | nodes | relationships | | [("Tim Duncan")] | <[edge1]> | @@ -941,11 +941,11 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Manu Ginobili"@0] | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | [:like "Tony Parker"->"Manu Ginobili"@0] | - | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | + | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | @@ -987,8 +987,8 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1001,8 +1001,8 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | a | b | | [("Tim Duncan")] | <[edge1]> | @@ -1066,12 +1066,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -1104,12 +1104,12 @@ Feature: subgraph | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquile O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | + | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquile O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | + | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | @@ -1151,80 +1151,80 @@ Feature: subgraph GET SUBGRAPH WITH PROP 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Yao Ming")] | <[edge1]> | @@ -1242,21 +1242,21 @@ Feature: subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -1314,8 +1314,8 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1328,8 +1328,8 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Tim Duncan")] | <[edge1]> | @@ -1377,80 +1377,80 @@ Feature: subgraph GET SUBGRAPH 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like """ Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquile O'Neal") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquile O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | + | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | + | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | + | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | + | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | + | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | + | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | + | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | + | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | + | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | + | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | + | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | + | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | + | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | + | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | + | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | + | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | + | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | + | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | + | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | + | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | + | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | + | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | + | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | + | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | + | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | + | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | + | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | + | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | + | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | + | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | + | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | + | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | + | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | + | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | + | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | + | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | + | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | + | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | + | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | + | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | + | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | + | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | + | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | + | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | + | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | + | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | + | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | + | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | + | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | + | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | + | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | + | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Yao Ming")] | <[edge1]> | @@ -1468,21 +1468,21 @@ Feature: subgraph | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | + | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquile O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquile O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquile O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquile O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquile O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquile O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquile O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | + | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | + | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | + | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | + | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | + | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | + | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | + | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | + | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | + | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | @@ -1540,8 +1540,8 @@ Feature: subgraph | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquile O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquile O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | + | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | + | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | @@ -1554,8 +1554,8 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquile O'Neal"@0] | | | | | | - | | | [:like "Shaquile O'Neal"->"JaVale McGee"@0] | | | | | | + | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | + | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | Then the result should be, in any order, with relax comparison: | _vertices | _edges | | [("Tim Duncan")] | <[edge1]> | diff --git a/tests/tck/features/update/Update.IntVid.feature b/tests/tck/features/update/Update.IntVid.feature index efa1149fb2a..a6d75cb8b1f 100644 --- a/tests/tck/features/update/Update.IntVid.feature +++ b/tests/tck/features/update/Update.IntVid.feature @@ -96,7 +96,7 @@ Feature: Update int vid of vertex and edge """ UPDATE VERTEX 101 SET course.credits = $^.course.credits + 1 - WHEN $^.course.name == "notexist" AND $^.course.credits > 2 + WHEN $^.course.name == "nonexistent" AND $^.course.credits > 2 YIELD $^.course.name AS Name, $^.course.credits AS Credits """ Then the result should be, in any order: diff --git a/tests/tck/features/update/Update.feature b/tests/tck/features/update/Update.feature index f77f39680a5..7a7f3407a45 100644 --- a/tests/tck/features/update/Update.feature +++ b/tests/tck/features/update/Update.feature @@ -98,7 +98,7 @@ Feature: Update string vid of vertex and edge """ UPDATE VERTEX "101" SET course.credits = $^.course.credits + 1 - WHEN $^.course.name == "notexist" AND $^.course.credits > 2 + WHEN $^.course.name == "nonexistent" AND $^.course.credits > 2 YIELD $^.course.name AS Name, $^.course.credits AS Credits """ Then the result should be, in any order: @@ -741,7 +741,7 @@ Feature: Update string vid of vertex and edge """ UPDATE VERTEX ON course "101" SET credits = credits + 1 - WHEN name == "notexist" AND credits > 2 + WHEN name == "nonexistent" AND credits > 2 YIELD name AS Name, credits AS Credits """ Then the result should be, in any order: diff --git a/tests/tck/features/verify_client_version/VerifyClientVersion.feature b/tests/tck/features/verify_client_version/VerifyClientVersion.feature index b454a32ae96..5021efc64bb 100644 --- a/tests/tck/features/verify_client_version/VerifyClientVersion.feature +++ b/tests/tck/features/verify_client_version/VerifyClientVersion.feature @@ -8,7 +8,7 @@ Feature: Verify client version When connecting the servers with a compatible client version Then the connection should be established - Scenario: incompactible version + Scenario: incompatible version Given nothing When connecting the servers with a client version of 100.0.0 Then the connection should be rejected diff --git a/tests/tck/job/Job.feature b/tests/tck/job/Job.feature index 1d683d55dad..cd41904c8bd 100644 --- a/tests/tck/job/Job.feature +++ b/tests/tck/job/Job.feature @@ -179,7 +179,7 @@ Feature: Submit job space requirements """ Then an ExecutionError should be raised at runtime:Job not in chosen space! - # This is skipped becuase it is hard to simulate the situation + # This is skipped because it is hard to simulate the situation # When executing query: # """ # RECOVER JOB; diff --git a/tests/tck/openCypher/features/expressions/map/Map1.feature b/tests/tck/openCypher/features/expressions/map/Map1.feature index ac7cc6d254a..3ca97a3271a 100644 --- a/tests/tck/openCypher/features/expressions/map/Map1.feature +++ b/tests/tck/openCypher/features/expressions/map/Map1.feature @@ -19,7 +19,7 @@ Feature: Map1 - Static value access @uncompatible Scenario: [2] Fail when performing property access on a non-map - # openCyter return : TypeError should be raised at runtime: PropertyAccessOnNonMap + # openCypher return : TypeError should be raised at runtime: PropertyAccessOnNonMap When executing query: """ WITH [{num: 0}, 1] AS list diff --git a/third-party/install-gcc.sh b/third-party/install-gcc.sh index b21509972fb..3e27aa49252 100755 --- a/third-party/install-gcc.sh +++ b/third-party/install-gcc.sh @@ -35,7 +35,7 @@ this_distro=$(lsb_release -si) this_libc_version=$(ldd --version | head -1 | cut -d ')' -f 2 | cut -d ' ' -f 2) hash wget &>/dev/null || { - echo "'wget' not fould, please install it first" 1>&2 + echo "'wget' not found, please install it first" 1>&2 exit 1 } diff --git a/third-party/install-third-party.sh b/third-party/install-third-party.sh index 2acc31c24c9..5ed1f5dbad7 100755 --- a/third-party/install-third-party.sh +++ b/third-party/install-third-party.sh @@ -37,7 +37,7 @@ this_gcc_version=$($cxx_cmd -dumpfullversion -dumpversion) this_abi_version=$($this_dir/cxx-compiler-abi-version.sh) hash wget &>/dev/null || { - echo "'wget' not fould, please install it first" 1>&2 + echo "'wget' not found, please install it first" 1>&2 exit 1 } From 21582a1aed4ff2af0455f20c5777d2c772db6494 Mon Sep 17 00:00:00 2001 From: Yee <2520865+yixinglu@users.noreply.github.com> Date: Thu, 18 Nov 2021 19:50:46 +0800 Subject: [PATCH 27/53] Trigger packaging action when pushing release branch (#3283) --- .github/workflows/nightly.yml | 2 +- .github/workflows/rc.yml | 109 ++++++++++++++++++++++++++++++++++ .github/workflows/release.yml | 86 +++++++-------------------- 3 files changed, 132 insertions(+), 65 deletions(-) create mode 100644 .github/workflows/rc.yml diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index d02a39a7b08..f5e7f5f3f19 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -54,7 +54,7 @@ jobs: endpoint: ${{ secrets.OSS_ENDPOINT }} bucket: nebula-graph asset-path: pkg-build/cpack_output - target-path: package/v2-nightly/${{ steps.vars.outputs.subdir }} + target-path: package/nightly/${{ steps.vars.outputs.subdir }} docker: name: build docker image diff --git a/.github/workflows/rc.yml b/.github/workflows/rc.yml new file mode 100644 index 00000000000..80da0b19ae5 --- /dev/null +++ b/.github/workflows/rc.yml @@ -0,0 +1,109 @@ +name: rc + +on: + push: + branches: + - 'v[0-9]+.*' + +concurrency: + group: rc + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + package: + name: build package + runs-on: [self-hosted, nebula] + strategy: + fail-fast: false + matrix: + os: + - ubuntu1604 + - ubuntu1804 + - ubuntu2004 + - centos7 + - centos8 + container: + image: vesoft/nebula-dev:${{ matrix.os }} + env: + BUILD_DIR: ./pkg-build + CPACK_DIR: ./pkg-build/cpack_output + SYMS_DIR: ./pkg-build/symbols + steps: + - uses: webiny/action-post-run@2.0.1 + with: + run: sh -c "find . -mindepth 1 -delete" + - uses: actions/checkout@v2 + - uses: ./.github/actions/tagname-action + id: tag + - name: package + run: ./package/package.sh -b ${{ steps.tag.outputs.tag }} -t RelWithDebInfo -r OFF -p ON -s TRUE + - name: output some vars + run: | + tar zcf ${{ env.CPACK_DIR }}/nebula-${{ steps.tag.outputs.tagnum }}.tar.gz --exclude=${{ env.BUILD_DIR }} ./* + find ${{ env.CPACK_DIR }} -type f \( -iname \*.deb -o -iname \*.rpm -o -iname \*.tar.gz \) -exec bash -c "sha256sum {} > {}.sha256sum.txt" \; + - uses: ./.github/actions/upload-to-oss-action + with: + key-id: ${{ secrets.OSS_ID }} + key-secret: ${{ secrets.OSS_SECRET }} + endpoint: ${{ secrets.OSS_ENDPOINT }} + bucket: nebula-graph + asset-path: ${{ env.CPACK_DIR }} + target-path: rc/${{ steps.tag.outputs.tagnum }} + - uses: ./.github/actions/upload-to-oss-action + with: + key-id: ${{ secrets.OSS_ID }} + key-secret: ${{ secrets.OSS_SECRET }} + endpoint: ${{ secrets.OSS_ENDPOINT }} + bucket: nebula-graph + asset-path: ${{ env.SYMS_DIR }} + target-path: rc/${{ steps.tag.outputs.tagnum }}/symbols + + docker_build: + name: docker-build + runs-on: [self-hosted, nebula] + strategy: + fail-fast: false + matrix: + service: + - graphd + - metad + - storaged + - tools + steps: + - uses: webiny/action-post-run@2.0.1 + with: + run: sh -c "find . -mindepth 1 -delete" + - uses: actions/checkout@v2 + - uses: ./.github/actions/tagname-action + id: tagname + - id: docker + run: | + majorver=$(git tag -l --sort=v:refname | tail -n1 | cut -f1 -d'.') + tag="" + if [[ $majorver == ${{ steps.tagname.outputs.majorver }} ]]; then + tag="${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}:latest" + fi + echo "::set-output name=tag::$tag" + - uses: docker/setup-qemu-action@v1 + - uses: docker/setup-buildx-action@v1 + - uses: docker/login-action@v1 + with: + registry: ${{ secrets.HARBOR_REGISTRY }} + username: ${{ secrets.HARBOR_USERNAME }} + password: ${{ secrets.HARBOR_PASSWORD }} + - uses: docker/build-push-action@v2 + with: + context: . + file: ./docker/Dockerfile.${{ matrix.service }} + platforms: linux/amd64,linux/arm64 + tags: | + ${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }} + ${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.majorver }} + ${{ steps.docker.outputs.tag }} + push: true + build-args: | + BRANCH=${{ steps.tagname.outputs.tag }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1e9e0893cc0..8cf7db89b47 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,54 +15,21 @@ defaults: jobs: package: - name: build package - runs-on: [self-hosted, nebula] - strategy: - fail-fast: false - matrix: - os: - - ubuntu1604 - - ubuntu1804 - - ubuntu2004 - - centos7 - - centos8 + name: package + runs-on: ubuntu-latest container: - image: vesoft/nebula-dev:${{ matrix.os }} - env: - BUILD_DIR: ./pkg-build - CPACK_DIR: ./pkg-build/cpack_output - SYMS_DIR: ./pkg-build/symbols + image: vesoft/nebula-dev:centos7 steps: - - uses: webiny/action-post-run@2.0.1 - with: - run: sh -c "find . -mindepth 1 -delete" - uses: actions/checkout@v2 - - name: Check License Header - uses: apache/skywalking-eyes@main - uses: ./.github/actions/tagname-action id: tag - - name: package - run: ./package/package.sh -b ${{ steps.tag.outputs.tag }} -t RelWithDebInfo -r OFF -p ON -s TRUE - - name: output some vars - run: | - tar zcf ${{ env.CPACK_DIR }}/nebula-${{ steps.tag.outputs.tagnum }}.tar.gz --exclude=${{ env.BUILD_DIR }} ./* - find ${{ env.CPACK_DIR }} -type f \( -iname \*.deb -o -iname \*.rpm -o -iname \*.tar.gz \) -exec bash -c "sha256sum {} > {}.sha256sum.txt" \; - - uses: ./.github/actions/upload-to-oss-action - with: - key-id: ${{ secrets.OSS_ID }} - key-secret: ${{ secrets.OSS_SECRET }} - endpoint: ${{ secrets.OSS_ENDPOINT }} - bucket: nebula-graph - asset-path: ${{ env.CPACK_DIR }} - target-path: package/${{ steps.tag.outputs.tagnum }} - - uses: ./.github/actions/upload-to-oss-action - with: - key-id: ${{ secrets.OSS_ID }} - key-secret: ${{ secrets.OSS_SECRET }} - endpoint: ${{ secrets.OSS_ENDPOINT }} - bucket: nebula-graph - asset-path: ${{ env.SYMS_DIR }} - target-path: package/${{ steps.tag.outputs.tagnum }}/symbols + - run: | + ossutil64 cp -rf \ + -i ${{ secrets.OSS_ID }} \ + -k ${{ secrets.OSS_SECRET }} \ + -e ${{ secrets.OSS_ENDPOINT }} \ + oss://nebula-graph/rc/${{ steps.tag.outputs.tagnum }} \ + oss://nebula-graph/package/${{ steps.tag.outputs.tagnum }} docker_build: name: docker-build @@ -80,8 +47,6 @@ jobs: with: run: sh -c "find . -mindepth 1 -delete" - uses: actions/checkout@v2 - - name: Check License Header - uses: apache/skywalking-eyes@main - uses: ./.github/actions/tagname-action id: tagname - id: docker @@ -89,24 +54,17 @@ jobs: majorver=$(git tag -l --sort=v:refname | tail -n1 | cut -f1 -d'.') tag="" if [[ $majorver == ${{ steps.tagname.outputs.majorver }} ]]; then - tag="vesoft/nebula-${{ matrix.service }}:latest" + tag="latest" fi echo "::set-output name=tag::$tag" - - uses: docker/setup-qemu-action@v1 - - uses: docker/setup-buildx-action@v1 - - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - uses: docker/build-push-action@v2 - with: - context: . - file: ./docker/Dockerfile.${{ matrix.service }} - platforms: linux/amd64,linux/arm64 - tags: | - vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.tag }} - vesoft/nebula-${{ matrix.service }}:${{ steps.tagname.outputs.majorver }} - ${{ steps.docker.outputs.tag }} - push: true - build-args: | - BRANCH=${{ steps.tagname.outputs.tag }} + - name: Sync docker images + env: + FROM_IMAGE: docker://${{ secrets.HARBOR_REGISTRY }}/vesoft/nebula-${{ matrix.service }} + TO_IMAGE: docker://docker.io/vesoft/nebula-${{ matrix.service }} + CMD: docker run --rm -ti quay.io/containers/skopeo:v1.4.1 copy -a --src-creds ${{ secrets.HARBOR_USERNAME }}:${{ secrets.HARBOR_PASSWORD }} --dest-creds ${{ secrets.DOCKER_USERNAME }}:${{ secrets.DOCKER_PASSWORD }} + run: | + ${{ env.CMD }} ${{ env.FROM_IMAGE }}:${{ steps.tagname.outputs.tag }} ${{ env.TO_IMAGE }}:${{ steps.tagname.outputs.tag }} + ${{ env.CMD }} ${{ env.FROM_IMAGE }}:${{ steps.tagname.outputs.tag }} ${{ env.TO_IMAGE }}:${{ steps.tagname.outputs.majorver }} + if [[ ! -z "${{ steps.docker.outputs.tag }}" ]]; then + ${{ env.CMD }} ${{ env.FROM_IMAGE }}:${{ steps.tagname.outputs.tag }} ${{ env.TO_IMAGE }}:${{ steps.docker.outputs.tag }} + fi From 8b6eb04cdb6bcd11bc2ab34a7d3f1322b6dd82d8 Mon Sep 17 00:00:00 2001 From: cpw <13495049+CPWstatic@users.noreply.github.com> Date: Fri, 19 Nov 2021 11:45:12 +0800 Subject: [PATCH 28/53] Using int64 to present graph execution latency. (#2858) --- src/interface/graph.thrift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/interface/graph.thrift b/src/interface/graph.thrift index 9df520a450a..8a70850fa09 100644 --- a/src/interface/graph.thrift +++ b/src/interface/graph.thrift @@ -80,7 +80,7 @@ struct PlanDescription { struct ExecutionResponse { 1: required common.ErrorCode error_code; - 2: required i32 latency_in_us; // Execution time on server + 2: required i64 latency_in_us; // Execution time on server 3: optional common.DataSet data; 4: optional binary space_name; 5: optional binary error_msg; From 4dd4a264d52160616f0314a9d3bbb187f96c5df9 Mon Sep 17 00:00:00 2001 From: "hs.zhang" <22708345+cangfengzhs@users.noreply.github.com> Date: Fri, 19 Nov 2021 14:53:52 +0800 Subject: [PATCH 29/53] fix issue 3317 (#3325) --- src/storage/exec/IndexEdgeScanNode.cpp | 13 +++++++++---- src/storage/exec/IndexVertexScanNode.cpp | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/storage/exec/IndexEdgeScanNode.cpp b/src/storage/exec/IndexEdgeScanNode.cpp index e9242e6e556..ae222323581 100644 --- a/src/storage/exec/IndexEdgeScanNode.cpp +++ b/src/storage/exec/IndexEdgeScanNode.cpp @@ -119,11 +119,16 @@ Map IndexEdgeScanNode::decodeFromBase(const std::string& key values[col] = Value(NebulaKeyUtils::getRank(context_->vIdLen(), key)); } break; case QueryUtils::ReturnColType::kOther: { - auto retVal = QueryUtils::readValue(reader.get(), col, edge_.back()->field(col)); - if (!retVal.ok()) { - LOG(FATAL) << "Bad value for field" << col; + auto field = edge_.back()->field(col); + if (field == nullptr) { + values[col] = Value::kNullUnknownProp; + } else { + auto retVal = QueryUtils::readValue(reader.get(), col, field); + if (!retVal.ok()) { + LOG(FATAL) << "Bad value for field" << col; + } + values[col] = std::move(retVal.value()); } - values[col] = std::move(retVal.value()); } break; default: LOG(FATAL) << "Unexpect column name:" << col; diff --git a/src/storage/exec/IndexVertexScanNode.cpp b/src/storage/exec/IndexVertexScanNode.cpp index a2a61c42f75..44ba4bde03b 100644 --- a/src/storage/exec/IndexVertexScanNode.cpp +++ b/src/storage/exec/IndexVertexScanNode.cpp @@ -96,11 +96,16 @@ Map IndexVertexScanNode::decodeFromBase(const std::string& k values[col] = Value(context_->tagId_); } break; case QueryUtils::ReturnColType::kOther: { - auto retVal = QueryUtils::readValue(reader.get(), col, tag_.back()->field(col)); - if (!retVal.ok()) { - LOG(FATAL) << "Bad value for field" << col; + auto field = tag_.back()->field(col); + if (field == nullptr) { + values[col] = Value::kNullUnknownProp; + } else { + auto retVal = QueryUtils::readValue(reader.get(), col, field); + if (!retVal.ok()) { + LOG(FATAL) << "Bad value for field" << col; + } + values[col] = std::move(retVal.value()); } - values[col] = std::move(retVal.value()); } break; default: LOG(FATAL) << "Unexpect column name:" << col; From c50145d19b6e48799ce524eb1fc361817ef781a2 Mon Sep 17 00:00:00 2001 From: Shylock Hg <33566796+Shylock-Hg@users.noreply.github.com> Date: Mon, 22 Nov 2021 11:05:59 +0800 Subject: [PATCH 30/53] Filter the label by tag property for more friendly to push down (#3334) * Filter the label by tag property for more friendly to push down. * Revert TODO. --- src/graph/planner/match/MatchClausePlanner.cpp | 18 ++++++++++-------- src/graph/validator/MatchValidator.cpp | 9 +++------ 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/graph/planner/match/MatchClausePlanner.cpp b/src/graph/planner/match/MatchClausePlanner.cpp index 7ff42c9914b..ec8fa3c2790 100644 --- a/src/graph/planner/match/MatchClausePlanner.cpp +++ b/src/graph/planner/match/MatchClausePlanner.cpp @@ -45,14 +45,12 @@ static Expression* genVertexFilter(const NodeInfo& node) { return node.filter; } static Expression* genEdgeFilter(const EdgeInfo& edge) { return edge.filter; } -static std::unique_ptr> genVertexProps(const NodeInfo& node, - QueryContext* qctx, - GraphSpaceID spaceId) { +static StatusOr>> genVertexProps(const NodeInfo& node, + QueryContext* qctx, + GraphSpaceID spaceId) { // TODO UNUSED(node); - UNUSED(qctx); - UNUSED(spaceId); - return std::make_unique>(); + return SchemaUtil::getAllVertexProp(qctx, spaceId, true); } static std::unique_ptr> genEdgeProps(const EdgeInfo& edge, @@ -239,7 +237,9 @@ Status MatchClausePlanner::leftExpandFromNode(const std::vector& nodeI auto& edge = edgeInfos[i - 1]; auto traverse = Traverse::make(qctx, subplan.root, spaceId); traverse->setSrc(nextTraverseStart); - traverse->setVertexProps(genVertexProps(node, qctx, spaceId)); + auto vertexProps = genVertexProps(node, qctx, spaceId); + NG_RETURN_IF_ERROR(vertexProps); + traverse->setVertexProps(std::move(vertexProps).value()); traverse->setEdgeProps(genEdgeProps(edge, reversely, qctx, spaceId)); traverse->setVertexFilter(genVertexFilter(node)); traverse->setEdgeFilter(genEdgeFilter(edge)); @@ -283,7 +283,9 @@ Status MatchClausePlanner::rightExpandFromNode(const std::vector& node auto& edge = edgeInfos[i]; auto traverse = Traverse::make(qctx, subplan.root, spaceId); traverse->setSrc(nextTraverseStart); - traverse->setVertexProps(genVertexProps(node, qctx, spaceId)); + auto vertexProps = genVertexProps(node, qctx, spaceId); + NG_RETURN_IF_ERROR(vertexProps); + traverse->setVertexProps(std::move(vertexProps).value()); traverse->setEdgeProps(genEdgeProps(edge, reversely, qctx, spaceId)); traverse->setVertexFilter(genVertexFilter(node)); traverse->setEdgeFilter(genEdgeFilter(edge)); diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index d3ca0b6f1cd..905ffe158d4 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -551,12 +551,9 @@ StatusOr MatchValidator::makeNodeSubFilter(const MapExpression *ma auto *pool = qctx_->objPool(); // Node has tag without property if (!label.empty() && map == nullptr) { - auto *left = ConstantExpression::make(pool, label); - - auto *args = ArgumentList::make(pool); - args->addArgument(VertexExpression::make(pool)); - auto *right = FunctionCallExpression::make(pool, "tags", args); - Expression *root = RelationalExpression::makeIn(pool, left, right); + // label._tag IS NOT EMPTY + auto *tagExpr = TagPropertyExpression::make(pool, label, kTag); + auto *root = UnaryExpression::makeIsNotEmpty(pool, tagExpr); return root; } From f449c17a42407f81a14956c74ab04ec776391aef Mon Sep 17 00:00:00 2001 From: Josh Soref <2119212+jsoref@users.noreply.github.com> Date: Mon, 22 Nov 2021 00:46:15 -0500 Subject: [PATCH 31/53] CI improvements (#3298) * Fix bashism * Report Gherkin diff * Add PHONY target for tests check-and-diff * Improve clang-format-diff handling Use GITHUB_BASE_REF because a PR can easily have more than one commit: https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables Switch CLANG_HOME handling to support debian/ubuntu installed clang-format or a user's installation at some other location. Install clang-format-10 if it isn't installed in CI (this is mostly for nektos/act where images might be thinner than the GitHub standard). * Get the base commit In order to see what changes this PR is making, we need to get the base commit. * Use actions/checkout to get base and head Co-authored-by: Josh Soref Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> Co-authored-by: kyle.cao --- .github/workflows/pull_request.yml | 16 +++++++++++----- .linters/cpp/hooks/pre-commit.sh | 30 ++++++++++++++++++++++++++---- tests/Makefile | 9 +++++++-- 3 files changed, 44 insertions(+), 11 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 73b00e144f0..7757ba606d2 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -23,24 +23,30 @@ jobs: steps: - uses: actions/checkout@v2 with: - fetch-depth: 2 + ref: ${{ github.event.pull_request.base.sha }} + - uses: actions/checkout@v2 + with: + clean: false - name: Check License Header uses: apache/skywalking-eyes@main + - name: Ensure clang-format-10 is available + run: | + command -v clang-format-10 > /dev/null || (apt-get update && apt-get install -y clang-format-10) - name: Cpplint run: | ln -snf $PWD/.linters/cpp/hooks/pre-commit.sh $PWD/.linters/cpp/pre-commit.sh - .linters/cpp/pre-commit.sh $(git --no-pager diff --diff-filter=d --name-only HEAD^ HEAD) + .linters/cpp/pre-commit.sh $(git --no-pager diff --diff-filter=d --name-only ${{ github.event.pull_request.base.sha }} HEAD) - name: Format check run: | - res=$(git diff -U0 --no-color HEAD^ | /usr/share/clang/clang-format-10/clang-format-diff.py -p1) - [[ ! -z "$res" ]] && exit 1 || true + git diff -U0 --no-color ${{ github.event.pull_request.base.sha }} HEAD | /usr/share/clang/clang-format-10/clang-format-diff.py -p1 | tee /tmp/.clang-format-diff + [ -s /tmp/.clang-format-diff ] && exit 1 || true - uses: actions/setup-python@v2 with: python-version: 3.7 - name: Prepare Gherkin exec environ run: make init-all -C tests - name: Check Gherkin feature format - run: make check -C tests + run: make check-and-diff -C tests build: name: build diff --git a/.linters/cpp/hooks/pre-commit.sh b/.linters/cpp/hooks/pre-commit.sh index 215762d7543..fe289cf68da 100755 --- a/.linters/cpp/hooks/pre-commit.sh +++ b/.linters/cpp/hooks/pre-commit.sh @@ -56,12 +56,34 @@ fi echo "Performing C++ code format check..." -CLANG_HOME=/opt/vesoft/toolset/clang/10.0.0/ +CLANG_FALLBACK=/opt/vesoft/toolset/clang/10.0.0/ +if [ -z "$CLANG_HOME" ] && [ -d "$CLANG_FALLBACK" ]; then + CLANG_HOME=$CLANG_FALLBACK +fi + +CLANG_FORMAT=$(command -v clang-format-10) +if [ -z "$CLANG_FORMAT" ]; then + CLANG_FORMAT=$CLANG_HOME/bin/clang-format +fi + +CLANG_FORMAT_DIFF=$(command -v clang-format-diff-10) +if [ -z "$CLANG_FORMAT_DIFF" ]; then + CLANG_FORMAT_DIFF=$CLANG_HOME/share/clang/clang-format-diff.py +fi -if [ ! -d "$CLANG_HOME" ]; then - echo "The $CLANG_HOME directory is not found, and the source changes cannot be automatically formatted." +if [ -z "$CLANG_FORMAT" ] || [ -z "$CLANG_FORMAT_DIFF" ]; then + if [ ! -d "$CLANG_HOME" ]; then + echo "The $CLANG_HOME directory was not found." + fi + if [ -z "$CLANG_FORMAT" ]; then + echo "Could not find clang-format" + fi + if [ -z "$CLANG_FORMAT_DIFF" ]; then + echo "Could not find clang-format-diff" + fi + echo "source changes cannot be automatically formatted." exit 0 fi -git diff -U0 --no-color --staged | $CLANG_HOME/share/clang/clang-format-diff.py -i -p1 -binary $CLANG_HOME/bin/clang-format +git diff -U0 --no-color --staged | "$CLANG_FORMAT_DIFF" -i -p1 -binary "$CLANG_FORMAT" git add $CHECK_FILES diff --git a/tests/Makefile b/tests/Makefile index eec798a81bc..51ade78f0c8 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,7 +2,7 @@ # # This source code is licensed under Apache 2.0 License. -.PHONY: fmt check init init-all clean test tck fail up down +.PHONY: fmt check check-and-diff init init-all clean test tck fail up down PYPI_MIRROR = https://mirrors.aliyun.com/pypi/simple/ # PYPI_MIRROR = http://pypi.mirrors.ustc.edu.cn/simple --trusted-host pypi.mirrors.ustc.edu.cn @@ -37,7 +37,7 @@ install-nebula-py: install-deps rm -rf $(CURR_DIR)/nebula-python gherkin-fmt: install-deps - @if [[ $(PY_VERSION) -lt 7 ]]; then echo 'Python version must >= 3.7'; exit 1; fi + @if [ $(PY_VERSION) -lt 7 ]; then echo 'Python version must >= 3.7'; exit 1; fi pip3 install --user poetry git clone --branch master https://github.com/OneContainer/reformat-gherkin $(CURR_DIR)/reformat-gherkin cd $(CURR_DIR)/reformat-gherkin && python3 -m poetry build @@ -55,6 +55,11 @@ fmt: check: @find $(CURR_DIR)/tck/ -type f -iname "*.feature" -print | xargs $(gherkin_fmt) --check +check-and-diff: + @(find $(CURR_DIR)/tck/ -type f -iname '*.feature' -print | xargs $(gherkin_fmt)) 2>&1 | tee .gherkin_fmt + @git diff + @tail -1 .gherkin_fmt | grep -qv , + up: clean @mkdir -p $(CURR_DIR)/.pytest $(run_test) --cmd=start \ From b575189b9b8a1587a40ab11055c070cd5194a4a3 Mon Sep 17 00:00:00 2001 From: jimingquan Date: Mon, 22 Nov 2021 17:46:09 +0800 Subject: [PATCH 32/53] enhance match attribute filter (#3272) * enhance match attribute filter * add test case * fix test error * delete some null test * fix test error * fix storage crash --- src/graph/validator/MatchValidator.cpp | 58 ++++++++++------- src/graph/validator/MatchValidator.h | 13 ++-- src/graph/visitor/FoldConstantExprVisitor.cpp | 20 ++++-- .../visitor/test/FilterTransformTest.cpp | 12 ++-- src/parser/MatchSentence.h | 2 + src/storage/exec/IndexScanNode.cpp | 36 ++++++---- .../tck/features/expression/EndsWith.feature | 8 +-- tests/tck/features/expression/Null.feature | 24 +++---- .../features/expression/StartsWith.feature | 8 +-- .../expression/function/Mathematical.feature | 4 +- .../function/TypeConversion.feature | 45 ++++++++----- tests/tck/features/go/GO.feature | 4 +- tests/tck/features/lookup/ByIndex.feature | 4 +- .../features/lookup/ByIndex.intVid.feature | 4 +- tests/tck/features/match/Base.IntVid.feature | 64 +++++++++++++++++- tests/tck/features/match/Base.feature | 65 ++++++++++++++++++- tests/tck/features/schema/Schema.feature | 2 +- tests/tck/features/yield/yield.IntVid.feature | 18 ++--- tests/tck/features/yield/yield.feature | 18 ++--- 19 files changed, 281 insertions(+), 128 deletions(-) diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 905ffe158d4..5789a41e593 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -112,8 +112,7 @@ Status MatchValidator::validateImpl() { return Status::OK(); } -Status MatchValidator::validatePath(const MatchPath *path, - MatchClauseContext &matchClauseCtx) const { +Status MatchValidator::validatePath(const MatchPath *path, MatchClauseContext &matchClauseCtx) { NG_RETURN_IF_ERROR( buildNodeInfo(path, matchClauseCtx.nodeInfos, matchClauseCtx.aliasesGenerated)); NG_RETURN_IF_ERROR( @@ -122,8 +121,7 @@ Status MatchValidator::validatePath(const MatchPath *path, return Status::OK(); } -Status MatchValidator::buildPathExpr(const MatchPath *path, - MatchClauseContext &matchClauseCtx) const { +Status MatchValidator::buildPathExpr(const MatchPath *path, MatchClauseContext &matchClauseCtx) { auto *pathAlias = path->alias(); if (pathAlias == nullptr) { return Status::OK(); @@ -148,7 +146,7 @@ Status MatchValidator::buildPathExpr(const MatchPath *path, Status MatchValidator::buildNodeInfo(const MatchPath *path, std::vector &nodeInfos, - std::unordered_map &aliases) const { + std::unordered_map &aliases) { auto *sm = qctx_->schemaMng(); auto steps = path->steps(); auto *pool = qctx_->objPool(); @@ -182,7 +180,7 @@ Status MatchValidator::buildNodeInfo(const MatchPath *path, } Expression *filter = nullptr; if (props != nullptr) { - auto result = makeNodeSubFilter(props, "*"); + auto result = makeNodeSubFilter(const_cast(props), "*"); NG_RETURN_IF_ERROR(result); filter = result.value(); } else if (node->labels() != nullptr && !node->labels()->labels().empty()) { @@ -204,7 +202,7 @@ Status MatchValidator::buildNodeInfo(const MatchPath *path, Status MatchValidator::buildEdgeInfo(const MatchPath *path, std::vector &edgeInfos, - std::unordered_map &aliases) const { + std::unordered_map &aliases) { auto *sm = qctx_->schemaMng(); auto steps = path->steps(); edgeInfos.resize(steps); @@ -250,7 +248,7 @@ Status MatchValidator::buildEdgeInfo(const MatchPath *path, } Expression *filter = nullptr; if (props != nullptr) { - auto result = makeEdgeSubFilter(props); + auto result = makeEdgeSubFilter(const_cast(props)); NG_RETURN_IF_ERROR(result); filter = result.value(); } @@ -521,32 +519,40 @@ Status MatchValidator::validateUnwind(const UnwindClause *unwindClause, return Status::OK(); } -StatusOr MatchValidator::makeEdgeSubFilter(const MapExpression *map) const { +StatusOr MatchValidator::makeEdgeSubFilter(MapExpression *map) const { auto *pool = qctx_->objPool(); DCHECK(map != nullptr); auto &items = map->items(); DCHECK(!items.empty()); - if (!ExpressionUtils::isEvaluableExpr(items[0].second)) { - return Status::SemanticError("Props must be constant: `%s'", + auto foldStatus = ExpressionUtils::foldConstantExpr(items[0].second); + NG_RETURN_IF_ERROR(foldStatus); + auto foldExpr = foldStatus.value(); + if (!ExpressionUtils::isEvaluableExpr(foldExpr)) { + return Status::SemanticError("Props must be evaluable: `%s'", items[0].second->toString().c_str()); } + map->setItem(0, std::make_pair(items[0].first, foldExpr)); Expression *root = RelationalExpression::makeEQ( - pool, EdgePropertyExpression::make(pool, "*", items[0].first), items[0].second->clone()); + pool, EdgePropertyExpression::make(pool, "*", items[0].first), foldExpr); for (auto i = 1u; i < items.size(); i++) { - if (!ExpressionUtils::isEvaluableExpr(items[i].second)) { - return Status::SemanticError("Props must be constant: `%s'", + foldStatus = ExpressionUtils::foldConstantExpr(items[i].second); + NG_RETURN_IF_ERROR(foldStatus); + foldExpr = foldStatus.value(); + if (!ExpressionUtils::isEvaluableExpr(foldExpr)) { + return Status::SemanticError("Props must be evaluable: `%s'", items[i].second->toString().c_str()); } + map->setItem(0, std::make_pair(items[i].first, foldExpr)); auto *left = root; auto *right = RelationalExpression::makeEQ( - pool, EdgePropertyExpression::make(pool, "*", items[i].first), items[i].second->clone()); + pool, EdgePropertyExpression::make(pool, "*", items[i].first), foldExpr); root = LogicalExpression::makeAnd(pool, left, right); } return root; } -StatusOr MatchValidator::makeNodeSubFilter(const MapExpression *map, +StatusOr MatchValidator::makeNodeSubFilter(MapExpression *map, const std::string &label) const { auto *pool = qctx_->objPool(); // Node has tag without property @@ -562,20 +568,28 @@ StatusOr MatchValidator::makeNodeSubFilter(const MapExpression *ma auto &items = map->items(); DCHECK(!items.empty()); - if (!ExpressionUtils::isEvaluableExpr(items[0].second)) { - return Status::SemanticError("Props must be constant: `%s'", + auto foldStatus = ExpressionUtils::foldConstantExpr(items[0].second); + NG_RETURN_IF_ERROR(foldStatus); + auto foldExpr = foldStatus.value(); + if (!ExpressionUtils::isEvaluableExpr(foldExpr)) { + return Status::SemanticError("Props must be evaluable: `%s'", items[0].second->toString().c_str()); } + map->setItem(0, std::make_pair(items[0].first, foldExpr)); Expression *root = RelationalExpression::makeEQ( - pool, TagPropertyExpression::make(pool, label, items[0].first), items[0].second->clone()); + pool, TagPropertyExpression::make(pool, label, items[0].first), foldExpr); for (auto i = 1u; i < items.size(); i++) { - if (!ExpressionUtils::isEvaluableExpr(items[i].second)) { - return Status::SemanticError("Props must be constant: `%s'", + foldStatus = ExpressionUtils::foldConstantExpr(items[i].second); + NG_RETURN_IF_ERROR(foldStatus); + foldExpr = foldStatus.value(); + if (!ExpressionUtils::isEvaluableExpr(foldExpr)) { + return Status::SemanticError("Props must be evaluable: `%s'", items[i].second->toString().c_str()); } + map->setItem(i, std::make_pair(items[i].first, foldExpr)); auto *left = root; auto *right = RelationalExpression::makeEQ( - pool, TagPropertyExpression::make(pool, label, items[i].first), items[i].second->clone()); + pool, TagPropertyExpression::make(pool, label, items[i].first), foldExpr); root = LogicalExpression::makeAnd(pool, left, right); } return root; diff --git a/src/graph/validator/MatchValidator.h b/src/graph/validator/MatchValidator.h index 14259adc8d5..e62743f4e6e 100644 --- a/src/graph/validator/MatchValidator.h +++ b/src/graph/validator/MatchValidator.h @@ -25,7 +25,7 @@ class MatchValidator final : public Validator { AstContext *getAstContext() override; - Status validatePath(const MatchPath *path, MatchClauseContext &matchClauseCtx) const; + Status validatePath(const MatchPath *path, MatchClauseContext &matchClauseCtx); Status validateFilter(const Expression *filter, WhereClauseContext &whereClauseCtx) const; @@ -68,13 +68,13 @@ class MatchValidator final : public Validator { Status buildNodeInfo(const MatchPath *path, std::vector &edgeInfos, - std::unordered_map &aliases) const; + std::unordered_map &aliases); Status buildEdgeInfo(const MatchPath *path, std::vector &nodeInfos, - std::unordered_map &aliases) const; + std::unordered_map &aliases); - Status buildPathExpr(const MatchPath *path, MatchClauseContext &matchClauseCtx) const; + Status buildPathExpr(const MatchPath *path, MatchClauseContext &matchClauseCtx); Status combineAliases(std::unordered_map &curAliases, const std::unordered_map &lastAliases) const; @@ -89,10 +89,9 @@ class MatchValidator final : public Validator { Status buildOutputs(const YieldColumns *yields); - StatusOr makeEdgeSubFilter(const MapExpression *map) const; + StatusOr makeEdgeSubFilter(MapExpression *map) const; - StatusOr makeNodeSubFilter(const MapExpression *map, - const std::string &label) const; + StatusOr makeNodeSubFilter(MapExpression *map, const std::string &label) const; private: std::unique_ptr matchCtx_; diff --git a/src/graph/visitor/FoldConstantExprVisitor.cpp b/src/graph/visitor/FoldConstantExprVisitor.cpp index dd970696774..5b5015e2279 100644 --- a/src/graph/visitor/FoldConstantExprVisitor.cpp +++ b/src/graph/visitor/FoldConstantExprVisitor.cpp @@ -346,17 +346,25 @@ Expression *FoldConstantExprVisitor::fold(Expression *expr) { auto value = expr->eval(ctx(nullptr)); if (value.type() == Value::Type::NULLVALUE) { switch (value.getNull()) { - case NullType::DIV_BY_ZERO: + case NullType::DIV_BY_ZERO: { canBeFolded_ = false; - status_ = Status::Error("/ by zero"); + status_ = Status::SemanticError("Divide by 0"); break; - case NullType::ERR_OVERFLOW: + } + case NullType::ERR_OVERFLOW: { + canBeFolded_ = false; + status_ = Status::SemanticError("result of %s cannot be represented as an integer", + expr->toString().c_str()); + break; + } + case NullType::BAD_TYPE: { canBeFolded_ = false; - status_ = Status::Error("result of %s cannot be represented as an integer", - expr->toString().c_str()); + status_ = Status::SemanticError("Type error `%s'", expr->toString().c_str()); break; - default: + } + default: { break; + } } } else { status_ = Status::OK(); diff --git a/src/graph/visitor/test/FilterTransformTest.cpp b/src/graph/visitor/test/FilterTransformTest.cpp index 7e949b64b4c..41eab24c428 100644 --- a/src/graph/visitor/test/FilterTransformTest.cpp +++ b/src/graph/visitor/test/FilterTransformTest.cpp @@ -29,7 +29,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { auto expr = ltExpr(minusExpr(laExpr("v", "age"), constantExpr(1)), constantExpr(9223372036854775807)); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (9223372036854775807+1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); @@ -39,7 +39,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { { auto expr = ltExpr(addExpr(laExpr("v", "age"), constantExpr(1)), constantExpr(INT64_MIN)); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (-9223372036854775808-1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); @@ -50,7 +50,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { auto expr = ltExpr(minusExpr(laExpr("v", "age"), constantExpr(1)), addExpr(constantExpr(9223372036854775807), constantExpr(1))); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (9223372036854775807+1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); @@ -61,7 +61,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { auto expr = ltExpr(addExpr(laExpr("v", "age"), constantExpr(1)), minusExpr(constantExpr(INT64_MIN), constantExpr(1))); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (-9223372036854775808-1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); @@ -72,7 +72,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { auto expr = notExpr(notExpr(notExpr(ltExpr(minusExpr(laExpr("v", "age"), constantExpr(1)), constantExpr(9223372036854775807))))); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (9223372036854775807+1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); @@ -83,7 +83,7 @@ TEST_F(FilterTransformTest, TestCalculationOverflow) { auto expr = notExpr(notExpr( notExpr(ltExpr(addExpr(laExpr("v", "age"), constantExpr(1)), constantExpr(INT64_MIN))))); auto res = ExpressionUtils::filterTransform(expr); - auto expected = Status::Error( + auto expected = Status::SemanticError( "result of (-9223372036854775808-1) cannot be represented as an " "integer"); ASSERT(!res.status().ok()); diff --git a/src/parser/MatchSentence.h b/src/parser/MatchSentence.h index 4390650b164..244a43efb11 100644 --- a/src/parser/MatchSentence.h +++ b/src/parser/MatchSentence.h @@ -165,6 +165,8 @@ class MatchNode final { const MapExpression* props() const { return props_; } + MapExpression* props() { return props_; } + std::string toString() const; private: diff --git a/src/storage/exec/IndexScanNode.cpp b/src/storage/exec/IndexScanNode.cpp index 41e6cf17b5a..426f02d752e 100644 --- a/src/storage/exec/IndexScanNode.cpp +++ b/src/storage/exec/IndexScanNode.cpp @@ -61,20 +61,30 @@ std::string Path::encodeValue(const Value& value, std::string& key) { std::string val; bool isNull = false; - if (colDef.get_type() == ::nebula::cpp2::PropertyType::GEOGRAPHY) { - CHECK_EQ(value.type(), Value::Type::STRING); - val = value.getStr(); - } else if (value.type() == Value::Type::STRING) { - val = IndexKeyUtils::encodeValue(value, *colDef.get_type_length()); - if (val.back() != '\0') { - strategySet_.insert(QualifiedStrategy::constant()); + switch (colDef.get_type()) { + case ::nebula::cpp2::PropertyType::STRING: + case ::nebula::cpp2::PropertyType::FIXED_STRING: { + if (value.type() == Value::Type::NULLVALUE) { + val = IndexKeyUtils::encodeNullValue(Value::Type::STRING, colDef.get_type_length()); + isNull = true; + } else { + val = IndexKeyUtils::encodeValue(value, *colDef.get_type_length()); + if (val.back() != '\0') { + strategySet_.insert(QualifiedStrategy::constant()); + } + } + break; + } + default: { + if (value.type() == Value::Type::NULLVALUE) { + auto vType = IndexKeyUtils::toValueType(colDef.get_type()); + val = IndexKeyUtils::encodeNullValue(vType, colDef.get_type_length()); + isNull = true; + } else { + val = IndexKeyUtils::encodeValue(value); + } + break; } - } else if (value.type() == Value::Type::NULLVALUE) { - auto vtype = IndexKeyUtils::toValueType(colDef.get_type()); - val = IndexKeyUtils::encodeNullValue(vtype, colDef.get_type_length()); - isNull = true; - } else { - val = IndexKeyUtils::encodeValue(value); } // If the current colDef can be null, then it is necessary to additionally determine whether the // corresponding value under a nullable is null when parsing the key (the encoding of the maximum diff --git a/tests/tck/features/expression/EndsWith.feature b/tests/tck/features/expression/EndsWith.feature index 8da777c0715..51212924c6c 100644 --- a/tests/tck/features/expression/EndsWith.feature +++ b/tests/tck/features/expression/EndsWith.feature @@ -60,9 +60,7 @@ Feature: Ends With Expression """ YIELD 123 ENDS WITH 3 """ - Then the result should be, in any order: - | (123 ENDS WITH 3) | - | BAD_TYPE | + Then a SemanticError should be raised at runtime: Type error `(123 ENDS WITH 3)' Scenario: yield not ends with When executing query: @@ -118,9 +116,7 @@ Feature: Ends With Expression """ YIELD 123 NOT ENDS WITH 3 """ - Then the result should be, in any order: - | (123 NOT ENDS WITH 3) | - | BAD_TYPE | + Then a SemanticError should be raised at runtime: Type error `(123 NOT ENDS WITH 3)' Scenario: ends with go When executing query: diff --git a/tests/tck/features/expression/Null.feature b/tests/tck/features/expression/Null.feature index 2044d72b542..e4616dbedc4 100644 --- a/tests/tck/features/expression/Null.feature +++ b/tests/tck/features/expression/Null.feature @@ -24,11 +24,11 @@ Feature: NULL related operations | NULL | NULL | NULL | NULL | NULL | When executing query: """ - RETURN cbrt(NULL) AS value1, hypot(NULL, NULL) AS value2, pow(NULL, NULL) AS value3, exp(NULL) AS value4, exp2(NULL) AS value5 + RETURN cbrt(NULL) AS value1, exp(NULL) AS value4, exp2(NULL) AS value5 """ Then the result should be, in any order: - | value1 | value2 | value3 | value4 | value5 | - | NULL | BAD_TYPE | BAD_TYPE | NULL | NULL | + | value1 | value4 | value5 | + | NULL | NULL | NULL | When executing query: """ RETURN log(NULL) AS value1, log2(NULL) AS value2, log10(NULL) AS value3, sin(NULL) AS value4, asin(NULL) AS value5 @@ -38,18 +38,18 @@ Feature: NULL related operations | NULL | NULL | NULL | NULL | NULL | When executing query: """ - RETURN cos(NULL) AS value1, acos(NULL) AS value2, tan(NULL) AS value3, atan(NULL) AS value4, rand32(NULL) AS value5 + RETURN cos(NULL) AS value1, acos(NULL) AS value2, tan(NULL) AS value3, atan(NULL) AS value4 """ Then the result should be, in any order: - | value1 | value2 | value3 | value4 | value5 | - | NULL | NULL | NULL | NULL | BAD_TYPE | + | value1 | value2 | value3 | value4 | + | NULL | NULL | NULL | NULL | When executing query: """ - RETURN collect(NULL) AS value1, avg(NULL) AS value2, count(NULL) AS value3, max(NULL) AS value4, rand64(NULL,NULL) AS value5 + RETURN collect(NULL) AS value1, avg(NULL) AS value2, count(NULL) AS value3, max(NULL) AS value4 """ Then the result should be, in any order: - | value1 | value2 | value3 | value4 | value5 | - | [] | NULL | 0 | NULL | BAD_TYPE | + | value1 | value2 | value3 | value4 | + | [] | NULL | 0 | NULL | When executing query: """ RETURN min(NULL) AS value1, std(NULL) AS value2, sum(NULL) AS value3, bit_and(NULL) AS value4, bit_or(NULL,NULL) AS value5 @@ -59,8 +59,8 @@ Feature: NULL related operations | NULL | NULL | 0 | NULL | NULL | When executing query: """ - RETURN bit_xor(NULL) AS value1, size(NULL) AS value2, range(NULL,NULL) AS value3, sign(NULL) AS value4, radians(NULL) AS value5 + RETURN bit_xor(NULL) AS value1, size(NULL) AS value2, sign(NULL) AS value4, radians(NULL) AS value5 """ Then the result should be, in any order: - | value1 | value2 | value3 | value4 | value5 | - | NULL | NULL | BAD_TYPE | NULL | NULL | + | value1 | value2 | value4 | value5 | + | NULL | NULL | NULL | NULL | diff --git a/tests/tck/features/expression/StartsWith.feature b/tests/tck/features/expression/StartsWith.feature index 4410ca986c0..782fe02d414 100644 --- a/tests/tck/features/expression/StartsWith.feature +++ b/tests/tck/features/expression/StartsWith.feature @@ -46,9 +46,7 @@ Feature: Starts With Expression """ YIELD 123 STARTS WITH 1 """ - Then the result should be, in any order: - | (123 STARTS WITH 1) | - | BAD_TYPE | + Then a SemanticError should be raised at runtime: Type error `(123 STARTS WITH 1)' Scenario: yield not starts with When executing query: @@ -90,9 +88,7 @@ Feature: Starts With Expression """ YIELD 123 NOT STARTS WITH 1 """ - Then the result should be, in any order: - | (123 NOT STARTS WITH 1) | - | BAD_TYPE | + Then a SemanticError should be raised at runtime: Type error `(123 NOT STARTS WITH 1)' Scenario: starts with go When executing query: diff --git a/tests/tck/features/expression/function/Mathematical.feature b/tests/tck/features/expression/function/Mathematical.feature index 656658d1168..cecdd0e40e5 100644 --- a/tests/tck/features/expression/function/Mathematical.feature +++ b/tests/tck/features/expression/function/Mathematical.feature @@ -22,6 +22,4 @@ Feature: Mathematical function Expression """ return [bit_and(5,true),bit_or(2,1.3),bit_xor("5",1)] as error_test """ - Then the result should be, in any order: - | error_test | - | [BAD_TYPE, BAD_TYPE, BAD_TYPE] | + Then a SemanticError should be raised at runtime: Type error `bit_and(5,true)' diff --git a/tests/tck/features/expression/function/TypeConversion.feature b/tests/tck/features/expression/function/TypeConversion.feature index a2481adb3d8..c3e83ccb30b 100644 --- a/tests/tck/features/expression/function/TypeConversion.feature +++ b/tests/tck/features/expression/function/TypeConversion.feature @@ -9,72 +9,81 @@ Feature: TypeConversion Expression Scenario: toBoolean When executing query: """ - YIELD [toBoolean(true), toBoolean(false), toBoolean(1), toBoolean(3.14), + YIELD [toBoolean(true), toBoolean(false), toBoolean("trUe"), toBoolean("3.14"), toBoolean(null)] AS yield_toBoolean """ Then the result should be, in any order: - | yield_toBoolean | - | [true, false, BAD_TYPE, BAD_TYPE, true, NULL, NULL] | + | yield_toBoolean | + | [true, false, true, NULL, NULL] | When executing query: """ - UNWIND [true, false, 1, 3.14, "trUe", "3.14", null] AS b + UNWIND [true, false, "trUe", "3.14", null] AS b RETURN toBoolean(b) AS unwind_toBoolean """ Then the result should be, in any order: | unwind_toBoolean | | true | | false | - | BAD_TYPE | - | BAD_TYPE | | true | | NULL | | NULL | + When executing query: + """ + YIELD [toBoolean(1), toBoolean(3.14)] AS yield_toBoolean + """ + Then a SemanticError should be raised at runtime: Type error `toBoolean(1)' Scenario: toFloat When executing query: """ - YIELD [toFloat(true), toFloat(false), toFloat(1), toFloat(3.14), + YIELD [toFloat(1), toFloat(3.14), toFloat("trUe"), toFloat("3.14"), toFloat(null)] AS yield_toFloat """ Then the result should be, in any order: - | yield_toFloat | - | [BAD_TYPE, BAD_TYPE, 1.0, 3.14, NULL, 3.14, NULL] | + | yield_toFloat | + | [1.0, 3.14, NULL, 3.14, NULL] | When executing query: """ - UNWIND [true, false, 1, 3.14, "trUe", "3.14", null] AS b + UNWIND [1, 3.14, "trUe", "3.14", null] AS b RETURN toFloat(b) AS unwind_toFloat """ Then the result should be, in any order: | unwind_toFloat | - | BAD_TYPE | - | BAD_TYPE | | 1.0 | | 3.14 | | NULL | | 3.14 | | NULL | + When executing query: + """ + YIELD [toFloat(true), toFloat(false)] AS yield_toFloat + """ + Then a SemanticError should be raised at runtime: Type error `toFloat(true)' Scenario: toInteger When executing query: """ - YIELD [toInteger(true), toInteger(false), toInteger(1), toInteger(3.14), + YIELD [toInteger(1), toInteger(3.14), toInteger("trUe"), toInteger("3.14"), toInteger(null), toInteger("1e3"), toInteger("1E3"), toInteger("1.5E4")] AS yield_toInteger """ Then the result should be, in any order: - | yield_toInteger | - | [BAD_TYPE, BAD_TYPE, 1, 3, NULL, 3, NULL, 1000, 1000, 15000] | + | yield_toInteger | + | [1, 3, NULL, 3, NULL, 1000, 1000, 15000] | When executing query: """ - UNWIND [true, false, 1, 3.14, "trUe", "3.14", null] AS b + UNWIND [1, 3.14, "trUe", "3.14", null] AS b RETURN toInteger(b) AS unwind_toInteger """ Then the result should be, in any order: | unwind_toInteger | - | BAD_TYPE | - | BAD_TYPE | | 1 | | 3 | | NULL | | 3 | | NULL | + When executing query: + """ + YIELD [toInteger(true), toInteger(false)] AS yield_toInteger + """ + Then a SemanticError should be raised at runtime: Type error `toInteger(true)' diff --git a/tests/tck/features/go/GO.feature b/tests/tck/features/go/GO.feature index 7878a3f9017..e48ead1ab2b 100644 --- a/tests/tck/features/go/GO.feature +++ b/tests/tck/features/go/GO.feature @@ -18,12 +18,12 @@ Feature: Go Sentence """ GO FROM "Tim Duncan", "Tony Parker" OVER like WHERE $$.player.age > 9223372036854775807+1 YIELD like._dst """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ GO FROM "Tim Duncan", "Tony Parker" OVER like WHERE $$.player.age > -9223372036854775808-1 YIELD like._dst """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer When executing query: """ GO FROM "Tim Duncan" OVER like YIELD $^.player.name as name, $^.player.age as age diff --git a/tests/tck/features/lookup/ByIndex.feature b/tests/tck/features/lookup/ByIndex.feature index d6df792509c..f12621b80fd 100644 --- a/tests/tck/features/lookup/ByIndex.feature +++ b/tests/tck/features/lookup/ByIndex.feature @@ -94,12 +94,12 @@ Feature: Lookup by index itself """ LOOKUP ON player WHERE player.age > 9223372036854775807+1 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ LOOKUP ON player WHERE player.age > -9223372036854775808-1 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer Scenario: [2] edge index Given a graph with space named "nba" diff --git a/tests/tck/features/lookup/ByIndex.intVid.feature b/tests/tck/features/lookup/ByIndex.intVid.feature index 7f6c3466b63..5dd9e4aaa5c 100644 --- a/tests/tck/features/lookup/ByIndex.intVid.feature +++ b/tests/tck/features/lookup/ByIndex.intVid.feature @@ -94,12 +94,12 @@ Feature: Lookup by index itself in integer vid """ LOOKUP ON player WHERE player.age > 9223372036854775807+1 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ LOOKUP ON player WHERE player.age > -9223372036854775808-1 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer Scenario: [2] edge index Given a graph with space named "nba_int_vid" diff --git a/tests/tck/features/match/Base.IntVid.feature b/tests/tck/features/match/Base.IntVid.feature index 0b95df4156d..d06379f3775 100644 --- a/tests/tck/features/match/Base.IntVid.feature +++ b/tests/tck/features/match/Base.IntVid.feature @@ -59,12 +59,12 @@ Feature: Basic match """ MATCH (v:player) where v.age > 9223372036854775807+1 return v """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ MATCH (v:player) where v.age > -9223372036854775808-1 return v """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer Scenario: Une step When executing query: @@ -415,6 +415,66 @@ Feature: Basic match | [:like "Tony Parker"->"Manu Ginobili" @0 {likeness: 95}] | | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | + Scenario: filter evaluable + When executing query: + """ + match (v:player{age: -1}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + When executing query: + """ + match (v:player{age: +20}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("Luka Doncic" :player{age: 20, name: "Luka Doncic"}) | + When executing query: + """ + match (v:player{age: 1+19}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("Luka Doncic" :player{age: 20, name: "Luka Doncic"}) | + When executing query: + """ + match (v:player)-[e:like{likeness:-1}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | + | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | + When executing query: + """ + match (v:player)-[e:like{likeness:40+50+5}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}] | + | [:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}] | + | [:like "Paul George"->"Russell Westbrook" @0 {likeness: 95}] | + | [:like "Tony Parker"->"Manu Ginobili" @0 {likeness: 95}] | + | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | + When executing query: + """ + match (v:player)-[e:like{likeness:4*20+5}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Jason Kidd"->"Dirk Nowitzki"@0{likeness:85}] | + | [:like "Steve Nash"->"Jason Kidd"@0{likeness:85}] | + When executing query: + """ + match (v:player)-[e:like{likeness:"99"}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + When executing query: + """ + match (v:player{age:"24"-1}) return v + """ + Then a SemanticError should be raised at runtime: Type error `("24"-1)' + Scenario: No return When executing query: """ diff --git a/tests/tck/features/match/Base.feature b/tests/tck/features/match/Base.feature index 26227d58e9b..ddd9f76d970 100644 --- a/tests/tck/features/match/Base.feature +++ b/tests/tck/features/match/Base.feature @@ -97,12 +97,12 @@ Feature: Basic match """ MATCH (v:player) where v.age > 9223372036854775807+1 return v """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ MATCH (v:player) where v.age > -9223372036854775808-1 return v """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer Scenario: One step When executing query: @@ -518,6 +518,67 @@ Feature: Basic match | [:like "Tony Parker"->"Manu Ginobili" @0 {likeness: 95}] | | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | + Scenario: filter evaluable + When executing query: + """ + match (v:player{age: -1}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("Null1" :player{age: -1, name: NULL}) | + When executing query: + """ + match (v:player{age: +20}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("Luka Doncic" :player{age: 20, name: "Luka Doncic"}) | + When executing query: + """ + match (v:player{age: 1+19}) return v + """ + Then the result should be, in any order, with relax comparison: + | v | + | ("Luka Doncic" :player{age: 20, name: "Luka Doncic"}) | + When executing query: + """ + match (v:player)-[e:like{likeness:-1}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | + | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | + When executing query: + """ + match (v:player)-[e:like{likeness:40+50+5}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}] | + | [:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}] | + | [:like "Paul George"->"Russell Westbrook" @0 {likeness: 95}] | + | [:like "Tony Parker"->"Manu Ginobili" @0 {likeness: 95}] | + | [:like "Tony Parker"->"Tim Duncan" @0 {likeness: 95}] | + When executing query: + """ + match (v:player)-[e:like{likeness:4*20+5}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + | [:like "Jason Kidd"->"Dirk Nowitzki"@0{likeness:85}] | + | [:like "Steve Nash"->"Jason Kidd"@0{likeness:85}] | + When executing query: + """ + match (v:player)-[e:like{likeness:"99"}]->() return e + """ + Then the result should be, in any order, with relax comparison: + | e | + When executing query: + """ + match (v:player{age:"24"-1}) return v + """ + Then a SemanticError should be raised at runtime: Type error `("24"-1)' + Scenario: No return When executing query: """ diff --git a/tests/tck/features/schema/Schema.feature b/tests/tck/features/schema/Schema.feature index 252c3589bf6..b203ea87b62 100644 --- a/tests/tck/features/schema/Schema.feature +++ b/tests/tck/features/schema/Schema.feature @@ -655,7 +655,7 @@ Feature: Insert string vid of vertex and edge """ CREATE TAG bad_null_default_value(name string DEFAULT "N/A", age int DEFAULT 1%0) """ - Then a ExecutionError should be raised at runtime: / by zero + Then a SemanticError should be raised at runtime: Divide by 0 # test alter tag with wrong type default value of string when add When executing query: """ diff --git a/tests/tck/features/yield/yield.IntVid.feature b/tests/tck/features/yield/yield.IntVid.feature index e00a0c10284..7e0772a9d96 100644 --- a/tests/tck/features/yield/yield.IntVid.feature +++ b/tests/tck/features/yield/yield.IntVid.feature @@ -320,42 +320,42 @@ Feature: Yield Sentence """ YIELD 9223372036854775807+1 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807-2) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807+-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807+-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807+-2) cannot be represented as an integer When executing query: """ YIELD 9223372036854775807*2 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807*2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807*2) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807*-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807*-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807*-2) cannot be represented as an integer When executing query: """ YIELD 9223372036854775807*-2 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807*-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807*-2) cannot be represented as an integer When executing query: """ YIELD 1/0 """ - Then a ExecutionError should be raised at runtime: / by zero + Then a SemanticError should be raised at runtime: Divide by 0 When executing query: """ YIELD 2%0 """ - Then a ExecutionError should be raised at runtime: / by zero + Then a SemanticError should be raised at runtime: Divide by 0 When executing query: """ YIELD -9223372036854775808 @@ -367,7 +367,7 @@ Feature: Yield Sentence """ YIELD --9223372036854775808 """ - Then a ExecutionError should be raised at runtime: result of -(-9223372036854775808) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of -(-9223372036854775808) cannot be represented as an integer When executing query: """ YIELD -9223372036854775809 diff --git a/tests/tck/features/yield/yield.feature b/tests/tck/features/yield/yield.feature index 01ca33e0b6a..254609ba9a7 100644 --- a/tests/tck/features/yield/yield.feature +++ b/tests/tck/features/yield/yield.feature @@ -330,42 +330,42 @@ Feature: Yield Sentence """ YIELD 9223372036854775807+1 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807-2) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807+-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807+-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807+-2) cannot be represented as an integer When executing query: """ YIELD 9223372036854775807*2 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807*2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807*2) cannot be represented as an integer When executing query: """ YIELD -9223372036854775807*-2 """ - Then a ExecutionError should be raised at runtime: result of (-9223372036854775807*-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (-9223372036854775807*-2) cannot be represented as an integer When executing query: """ YIELD 9223372036854775807*-2 """ - Then a ExecutionError should be raised at runtime: result of (9223372036854775807*-2) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of (9223372036854775807*-2) cannot be represented as an integer When executing query: """ YIELD 1/0 """ - Then a ExecutionError should be raised at runtime: / by zero + Then a SemanticError should be raised at runtime: Divide by 0 When executing query: """ YIELD 2%0 """ - Then a ExecutionError should be raised at runtime: / by zero + Then a SemanticError should be raised at runtime: Divide by 0 When executing query: """ YIELD -9223372036854775808 @@ -377,7 +377,7 @@ Feature: Yield Sentence """ YIELD --9223372036854775808 """ - Then a ExecutionError should be raised at runtime: result of -(-9223372036854775808) cannot be represented as an integer + Then a SemanticError should be raised at runtime: result of -(-9223372036854775808) cannot be represented as an integer When executing query: """ YIELD -9223372036854775809 From 80e827fca303fb88bf0dce4b00c93300607b69bc Mon Sep 17 00:00:00 2001 From: cpw <13495049+CPWstatic@users.noreply.github.com> Date: Mon, 22 Nov 2021 23:06:27 +0800 Subject: [PATCH 33/53] Multi paths. (#3318) * Add path list for match. * Support paths. * Fix test and optimize the linked objects. Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- src/common/expression/test/CMakeLists.txt | 22 +--------- src/common/geo/io/wkb/test/CMakeLists.txt | 49 ++--------------------- src/common/geo/io/wkt/test/CMakeLists.txt | 49 ++--------------------- src/daemons/CMakeLists.txt | 1 + src/graph/context/test/CMakeLists.txt | 3 +- src/graph/executor/test/CMakeLists.txt | 1 + src/graph/optimizer/test/CMakeLists.txt | 1 + src/graph/planner/CMakeLists.txt | 18 +++++---- src/graph/planner/Planner.h | 7 +--- src/graph/planner/plan/ExecutionPlan.h | 7 +++- src/graph/planner/test/CMakeLists.txt | 1 + src/graph/util/PlannerUtil.cpp | 1 - src/graph/util/ValidateUtil.cpp | 1 - src/graph/util/test/CMakeLists.txt | 3 +- src/graph/validator/MatchValidator.cpp | 5 ++- src/graph/validator/test/CMakeLists.txt | 1 + src/graph/visitor/test/CMakeLists.txt | 1 + src/parser/MatchSentence.cpp | 19 ++++++++- src/parser/MatchSentence.h | 27 ++++++++++--- src/parser/parser.yy | 15 ++++++- src/parser/test/CMakeLists.txt | 9 +++-- src/parser/test/ParserTest.cpp | 35 ++++++++++++++++ 22 files changed, 131 insertions(+), 145 deletions(-) diff --git a/src/common/expression/test/CMakeLists.txt b/src/common/expression/test/CMakeLists.txt index 6781b5da6d3..1cda6272cd3 100644 --- a/src/common/expression/test/CMakeLists.txt +++ b/src/common/expression/test/CMakeLists.txt @@ -34,12 +34,10 @@ set(expression_test_common_libs $ $ $ - $ $ $ $ - $ - $ + $ $ $ $ @@ -78,16 +76,7 @@ nebula_add_test( TypeCastingExpressionTest.cpp VersionedVariableExpressionTest.cpp OBJECTS - $ - $ - $ $ - $ - $ - $ - $ - $ - $ ${expression_test_common_libs} LIBRARIES gtest @@ -101,16 +90,7 @@ nebula_add_executable( SOURCES ExpressionBenchmark.cpp OBJECTS - $ - $ - $ $ - $ - $ - $ - $ - $ - $ ${expression_test_common_libs} LIBRARIES follybenchmark diff --git a/src/common/geo/io/wkb/test/CMakeLists.txt b/src/common/geo/io/wkb/test/CMakeLists.txt index cc6ebedd674..52e5d860d95 100644 --- a/src/common/geo/io/wkb/test/CMakeLists.txt +++ b/src/common/geo/io/wkb/test/CMakeLists.txt @@ -2,53 +2,12 @@ # # This source code is licensed under Apache 2.0 License. -set(WKB_TEST_LIBS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ -) - nebula_add_test( NAME wkb_test SOURCES WKBTest.cpp - OBJECTS ${WKB_TEST_LIBS} + OBJECTS + $ + $ + $ LIBRARIES gtest gtest_main ${THRIFT_LIBRARIES} ${PROXYGEN_LIBRARIES} ) diff --git a/src/common/geo/io/wkt/test/CMakeLists.txt b/src/common/geo/io/wkt/test/CMakeLists.txt index c751a24d883..aadfdc18baa 100644 --- a/src/common/geo/io/wkt/test/CMakeLists.txt +++ b/src/common/geo/io/wkt/test/CMakeLists.txt @@ -2,53 +2,12 @@ # # This source code is licensed under Apache 2.0 License. -set(WKT_TEST_LIBS - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ - $ -) - nebula_add_test( NAME wkt_test SOURCES WKTTest.cpp - OBJECTS ${WKT_TEST_LIBS} + OBJECTS + $ + $ + $ LIBRARIES gtest gtest_main ${THRIFT_LIBRARIES} ${PROXYGEN_LIBRARIES} ) diff --git a/src/daemons/CMakeLists.txt b/src/daemons/CMakeLists.txt index 1a92710dc34..36161fc8b01 100644 --- a/src/daemons/CMakeLists.txt +++ b/src/daemons/CMakeLists.txt @@ -118,6 +118,7 @@ nebula_add_executable( $ $ $ + $ $ $ $ diff --git a/src/graph/context/test/CMakeLists.txt b/src/graph/context/test/CMakeLists.txt index abf7d281a82..54cd9fe3237 100644 --- a/src/graph/context/test/CMakeLists.txt +++ b/src/graph/context/test/CMakeLists.txt @@ -34,11 +34,10 @@ SET(CONTEXT_TEST_LIBS $ $ $ - $ $ $ $ - $ + $ $ $ $ diff --git a/src/graph/executor/test/CMakeLists.txt b/src/graph/executor/test/CMakeLists.txt index c3a60a48d38..d88f3b299e7 100644 --- a/src/graph/executor/test/CMakeLists.txt +++ b/src/graph/executor/test/CMakeLists.txt @@ -41,6 +41,7 @@ SET(EXEC_QUERY_TEST_OBJS $ $ $ + $ $ $ $ diff --git a/src/graph/optimizer/test/CMakeLists.txt b/src/graph/optimizer/test/CMakeLists.txt index eaec4363bf6..f014c9abfa4 100644 --- a/src/graph/optimizer/test/CMakeLists.txt +++ b/src/graph/optimizer/test/CMakeLists.txt @@ -39,6 +39,7 @@ set(OPTIMIZER_TEST_LIB $ $ $ + $ $ $ $ diff --git a/src/graph/planner/CMakeLists.txt b/src/graph/planner/CMakeLists.txt index f5e0eab5d70..c3d487105ed 100644 --- a/src/graph/planner/CMakeLists.txt +++ b/src/graph/planner/CMakeLists.txt @@ -26,6 +26,17 @@ nebula_add_library( match/PropIndexSeek.cpp match/VertexIdSeek.cpp match/LabelIndexSeek.cpp + ngql/PathPlanner.cpp + ngql/GoPlanner.cpp + ngql/SubgraphPlanner.cpp + ngql/LookupPlanner.cpp + ngql/FetchVerticesPlanner.cpp + ngql/FetchEdgesPlanner.cpp + ngql/MaintainPlanner.cpp +) + +nebula_add_library( + plan_obj OBJECT plan/PlanNode.cpp plan/ExecutionPlan.cpp plan/Algo.cpp @@ -34,11 +45,4 @@ nebula_add_library( plan/Query.cpp plan/Mutate.cpp plan/Maintain.cpp - ngql/PathPlanner.cpp - ngql/GoPlanner.cpp - ngql/SubgraphPlanner.cpp - ngql/LookupPlanner.cpp - ngql/FetchVerticesPlanner.cpp - ngql/FetchEdgesPlanner.cpp - ngql/MaintainPlanner.cpp ) diff --git a/src/graph/planner/Planner.h b/src/graph/planner/Planner.h index 96d9d421c69..ec207774491 100644 --- a/src/graph/planner/Planner.h +++ b/src/graph/planner/Planner.h @@ -10,6 +10,7 @@ #include "common/base/Base.h" #include "graph/context/ast/AstContext.h" +#include "graph/planner/plan/ExecutionPlan.h" #include "graph/planner/plan/PlanNode.h" namespace nebula { @@ -23,12 +24,6 @@ extern const char* kVertexID; extern const char* kVertices; extern const char* kEdges; -struct SubPlan { - // root and tail of a subplan. - PlanNode* root{nullptr}; - PlanNode* tail{nullptr}; -}; - std::ostream& operator<<(std::ostream& os, const SubPlan& subplan); using MatchFunc = std::function; diff --git a/src/graph/planner/plan/ExecutionPlan.h b/src/graph/planner/plan/ExecutionPlan.h index b65c9e9034f..2692d6e67f6 100644 --- a/src/graph/planner/plan/ExecutionPlan.h +++ b/src/graph/planner/plan/ExecutionPlan.h @@ -15,9 +15,14 @@ struct PlanDescription; struct PlanNodeDescription; namespace graph { - class PlanNode; +struct SubPlan { + // root and tail of a subplan. + PlanNode* root{nullptr}; + PlanNode* tail{nullptr}; +}; + class ExecutionPlan final { public: explicit ExecutionPlan(PlanNode* root = nullptr); diff --git a/src/graph/planner/test/CMakeLists.txt b/src/graph/planner/test/CMakeLists.txt index f2268e60158..355c1cbae60 100644 --- a/src/graph/planner/test/CMakeLists.txt +++ b/src/graph/planner/test/CMakeLists.txt @@ -43,6 +43,7 @@ nebula_add_test( $ $ $ + $ $ $ $ diff --git a/src/graph/util/PlannerUtil.cpp b/src/graph/util/PlannerUtil.cpp index 32154371647..d161ede4557 100644 --- a/src/graph/util/PlannerUtil.cpp +++ b/src/graph/util/PlannerUtil.cpp @@ -9,7 +9,6 @@ #include "common/expression/ColumnExpression.h" #include "graph/context/QueryContext.h" #include "graph/context/ast/QueryAstContext.h" -#include "graph/planner/Planner.h" #include "graph/planner/plan/Query.h" namespace nebula { diff --git a/src/graph/util/ValidateUtil.cpp b/src/graph/util/ValidateUtil.cpp index 748c9db3925..b0f6eeaa18d 100644 --- a/src/graph/util/ValidateUtil.cpp +++ b/src/graph/util/ValidateUtil.cpp @@ -9,7 +9,6 @@ #include "common/expression/ColumnExpression.h" #include "graph/context/QueryContext.h" #include "graph/context/ast/QueryAstContext.h" -#include "graph/planner/Planner.h" #include "graph/planner/plan/Query.h" #include "graph/util/ExpressionUtils.h" diff --git a/src/graph/util/test/CMakeLists.txt b/src/graph/util/test/CMakeLists.txt index afdbc51cc46..abf1373d1e9 100644 --- a/src/graph/util/test/CMakeLists.txt +++ b/src/graph/util/test/CMakeLists.txt @@ -39,11 +39,10 @@ nebula_add_test( $ $ $ - $ + $ $ $ $ - $ $ LIBRARIES gtest diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index 5789a41e593..d2e6f009d72 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -45,7 +45,10 @@ Status MatchValidator::validateImpl() { auto matchClauseCtx = getContext(); matchClauseCtx->aliasesUsed = aliasesUsed; - NG_RETURN_IF_ERROR(validatePath(matchClause->path(), *matchClauseCtx)); + if (matchClause->path()->pathSize() > 1) { + return Status::SemanticError("Multi paths not supported."); + } + NG_RETURN_IF_ERROR(validatePath(matchClause->path()->path(0) /* TODO */, *matchClauseCtx)); if (matchClause->where() != nullptr) { auto whereClauseCtx = getContext(); whereClauseCtx->aliasesUsed = &matchClauseCtx->aliasesGenerated; diff --git a/src/graph/validator/test/CMakeLists.txt b/src/graph/validator/test/CMakeLists.txt index fa19c7b7009..f01755e0938 100644 --- a/src/graph/validator/test/CMakeLists.txt +++ b/src/graph/validator/test/CMakeLists.txt @@ -14,6 +14,7 @@ set(VALIDATOR_TEST_LIBS $ $ $ + $ $ $ $ diff --git a/src/graph/visitor/test/CMakeLists.txt b/src/graph/visitor/test/CMakeLists.txt index 2b0cffae4d9..9cc8cb43d5b 100644 --- a/src/graph/visitor/test/CMakeLists.txt +++ b/src/graph/visitor/test/CMakeLists.txt @@ -18,6 +18,7 @@ nebula_add_test( $ $ $ + $ $ $ $ diff --git a/src/parser/MatchSentence.cpp b/src/parser/MatchSentence.cpp index a6f06fe3c93..edfd0b8357a 100644 --- a/src/parser/MatchSentence.cpp +++ b/src/parser/MatchSentence.cpp @@ -20,7 +20,7 @@ std::string MatchClause::toString() const { } buf += "MATCH "; - buf += path_->toString(); + buf += pathList_->toString(); if (where_ != nullptr) { buf += " "; buf += where_->toString(); @@ -224,7 +224,7 @@ std::string MatchSentence::toString() const { std::string buf; buf.reserve(256); - for (auto &clause : clauses_) { + for (auto& clause : clauses_) { buf += clause->toString(); buf += " "; } @@ -234,4 +234,19 @@ std::string MatchSentence::toString() const { return buf; } +MatchPathList::MatchPathList(MatchPath* path) { pathList_.emplace_back(path); } + +void MatchPathList::add(MatchPath* path) { pathList_.emplace_back(path); } + +std::string MatchPathList::toString() const { + std::string buf; + buf.reserve(256); + std::vector pathList; + std::transform(pathList_.begin(), pathList_.end(), std::back_inserter(pathList), [](auto& path) { + return path->toString(); + }); + folly::join(",", pathList.begin(), pathList.end(), buf); + return buf; +} + } // namespace nebula diff --git a/src/parser/MatchSentence.h b/src/parser/MatchSentence.h index 244a43efb11..34125ca49df 100644 --- a/src/parser/MatchSentence.h +++ b/src/parser/MatchSentence.h @@ -206,6 +206,22 @@ class MatchPath final { std::vector> edges_; }; +class MatchPathList final { + public: + explicit MatchPathList(MatchPath* path); + + void add(MatchPath* path); + + size_t pathSize() const { return pathList_.size(); } + + const MatchPath* path(size_t i) const { return pathList_[i].get(); } + + std::string toString() const; + + private: + std::vector> pathList_; +}; + class MatchReturnItems final { public: explicit MatchReturnItems(bool includeExisting, YieldColumns* columns = nullptr) @@ -288,15 +304,16 @@ class ReadingClause { class MatchClause final : public ReadingClause { public: - MatchClause(MatchPath* path, WhereClause* where, bool optional) : ReadingClause(Kind::kMatch) { - path_.reset(path); + MatchClause(MatchPathList* pathList, WhereClause* where, bool optional) + : ReadingClause(Kind::kMatch) { + pathList_.reset(pathList); where_.reset(where); isOptional_ = optional; } - MatchPath* path() { return path_.get(); } + MatchPathList* pathList() { return pathList_.get(); } - const MatchPath* path() const { return path_.get(); } + const MatchPathList* path() const { return pathList_.get(); } WhereClause* where() { return where_.get(); } @@ -308,7 +325,7 @@ class MatchClause final : public ReadingClause { private: bool isOptional_{false}; - std::unique_ptr path_; + std::unique_ptr pathList_; std::unique_ptr where_; }; diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 7508eb588e8..a0fac7dae9a 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -128,6 +128,7 @@ static constexpr size_t kCommentLengthLimit = 256; ExpressionList *expression_list; MapItemList *map_item_list; MatchPath *match_path; + MatchPathList *match_path_list; MatchNode *match_node; MatchNodeLabel *match_node_label; MatchNodeLabelList *match_node_label_list; @@ -301,6 +302,7 @@ static constexpr size_t kCommentLengthLimit = 256; %type match_path_pattern %type match_path +%type match_path_list %type match_node %type match_node_label %type match_node_label_list @@ -1503,7 +1505,7 @@ with_clause ; match_clause - : KW_MATCH match_path where_clause { + : KW_MATCH match_path_list where_clause { if ($3 && graph::ExpressionUtils::findAny($3->filter(),{Expression::Kind::kAggregate})) { delete($2); delete($3); @@ -1512,7 +1514,7 @@ match_clause $$ = new MatchClause($2, $3, false/*optional*/); } } - | KW_OPTIONAL KW_MATCH match_path where_clause { + | KW_OPTIONAL KW_MATCH match_path_list where_clause { if ($4 && graph::ExpressionUtils::findAny($4->filter(),{Expression::Kind::kAggregate})) { delete($3); delete($4); @@ -1597,6 +1599,15 @@ match_path } ; +match_path_list + : match_path { + $$ = new MatchPathList($1); + } + | match_path_list COMMA match_path { + $$ = $1; + $$->add($3); + } + match_node : L_PAREN match_alias R_PAREN { $$ = new MatchNode(*$2, nullptr, nullptr); diff --git a/src/parser/test/CMakeLists.txt b/src/parser/test/CMakeLists.txt index b1ef0dc9069..6e71a9cf044 100644 --- a/src/parser/test/CMakeLists.txt +++ b/src/parser/test/CMakeLists.txt @@ -38,25 +38,26 @@ set(PARSER_TEST_LIBS $ $ $ - $ - $ $ $ $ $ + $ ) nebula_add_test( NAME parser_test SOURCES ParserTest.cpp - OBJECTS ${PARSER_TEST_LIBS} + OBJECTS + ${PARSER_TEST_LIBS} LIBRARIES gtest gtest_main ${THRIFT_LIBRARIES} ${PROXYGEN_LIBRARIES} ) nebula_add_test( NAME scanner_test SOURCES ScannerTest.cpp - OBJECTS ${PARSER_TEST_LIBS} + OBJECTS + ${PARSER_TEST_LIBS} LIBRARIES gtest gtest_main ${THRIFT_LIBRARIES} ${PROXYGEN_LIBRARIES} ) diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index dedba7f9ad9..ba3fd9ca79a 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -2610,6 +2610,41 @@ TEST_F(ParserTest, Match) { auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } + { + std::string query = + "MATCH (a)-[b]-(c) " + "WITH a,b,c " + "RETURN a,b,c"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + } + { + std::string query = + "MATCH (a)-[b]-(c) " + "MATCH (c)-[d]-(e) " + "RETURN a,b,c,d,e"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + } + { + std::string query = + "MATCH (a)-[b]-(c) " + "WITH a,b,c " + "MATCH (c)-[d]-(e) " + "RETURN a,b,c,d,e"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + } + { + std::string query = "MATCH (a),(b),(c) RETURN a,b,c"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + } + { + std::string query = "MATCH (a)-[b]-(c), (c)-[d]-(e) RETURN a,b,c,d,e"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + } } TEST_F(ParserTest, MatchErrorCheck) { From 6cdca05ef68343b1a44203b1857e7febc11c77d9 Mon Sep 17 00:00:00 2001 From: jimingquan Date: Tue, 23 Nov 2021 11:39:09 +0800 Subject: [PATCH 34/53] format find path (#2957) * format find path * add test case * only support yield path * fix test error * address comment * fix error * add yield clause * fix ci test * remove reduncant header file * fix clang compiler error * fix spell error --- .linters/cpp/checkKeyword.py | 1 + src/graph/context/ast/QueryAstContext.h | 1 + src/graph/optimizer/OptGroup.h | 4 - src/graph/optimizer/Optimizer.h | 2 - src/graph/optimizer/OptimizerUtils.cpp | 12 - .../optimizer/rule/CollapseProjectRule.h | 2 - src/graph/optimizer/rule/CombineFilterRule.h | 2 - .../rule/GeoPredicateIndexScanBaseRule.cpp | 3 - src/graph/optimizer/rule/IndexScanRule.cpp | 2 - .../OptimizeEdgeIndexScanByFilterRule.cpp | 12 - .../rule/OptimizeEdgeIndexScanByFilterRule.h | 2 - .../rule/OptimizeTagIndexScanByFilterRule.cpp | 2 - .../rule/OptimizeTagIndexScanByFilterRule.h | 2 - .../rule/PushFilterDownAggregateRule.h | 2 - .../rule/PushFilterDownGetNbrsRule.cpp | 5 - .../rule/PushFilterDownGetNbrsRule.h | 2 - .../rule/PushFilterDownLeftJoinRule.h | 2 - .../rule/PushFilterDownProjectRule.h | 2 - .../PushLimitDownEdgeIndexFullScanRule.cpp | 6 - .../rule/PushLimitDownEdgeIndexFullScanRule.h | 2 - .../PushLimitDownEdgeIndexPrefixScanRule.cpp | 6 - .../PushLimitDownEdgeIndexPrefixScanRule.h | 2 - .../PushLimitDownEdgeIndexRangeScanRule.cpp | 6 - .../PushLimitDownEdgeIndexRangeScanRule.h | 2 - .../rule/PushLimitDownGetNeighborsRule.cpp | 7 - .../rule/PushLimitDownGetNeighborsRule.h | 2 - .../rule/PushLimitDownIndexScanRule.cpp | 7 - .../rule/PushLimitDownIndexScanRule.h | 2 - .../rule/PushLimitDownProjectRule.cpp | 7 - .../optimizer/rule/PushLimitDownProjectRule.h | 2 - .../PushLimitDownTagIndexFullScanRule.cpp | 7 - .../rule/PushLimitDownTagIndexFullScanRule.h | 2 - .../PushLimitDownTagIndexPrefixScanRule.cpp | 7 - .../PushLimitDownTagIndexPrefixScanRule.h | 2 - .../PushLimitDownTagIndexRangeScanRule.cpp | 7 - .../rule/PushLimitDownTagIndexRangeScanRule.h | 2 - .../PushStepLimitDownGetNeighborsRule.cpp | 7 - .../rule/PushStepLimitDownGetNeighborsRule.h | 2 - .../PushStepSampleDownGetNeighborsRule.cpp | 7 - .../rule/PushStepSampleDownGetNeighborsRule.h | 2 - .../optimizer/rule/RemoveNoopProjectRule.h | 2 - src/graph/optimizer/rule/TopNRule.cpp | 8 - src/graph/optimizer/rule/TopNRule.h | 2 - .../rule/UnionAllIndexScanBaseRule.cpp | 3 - src/graph/planner/ngql/PathPlanner.cpp | 22 +- src/graph/validator/FindPathValidator.cpp | 20 +- src/graph/validator/FindPathValidator.h | 2 + src/graph/validator/GetSubgraphValidator.cpp | 3 - .../validator/test/FindPathValidatorTest.cpp | 72 +++-- .../test/GetSubgraphValidatorTest.cpp | 2 +- .../validator/test/QueryValidatorTest.cpp | 5 +- .../validator/test/YieldValidatorTest.cpp | 2 +- src/graph/visitor/FindVisitor.cpp | 9 + src/graph/visitor/FindVisitor.h | 1 + src/parser/TraverseSentences.cpp | 21 +- src/parser/TraverseSentences.h | 5 + src/parser/parser.yy | 18 +- src/parser/scanner.lex | 2 +- src/parser/test/ParserTest.cpp | 4 +- tests/query/stateless/test_keyword.py | 4 +- tests/tck/features/match/Unwind.feature | 12 +- .../tck/features/path/AllPath.IntVid.feature | 139 +++++++--- tests/tck/features/path/AllPath.feature | 139 +++++++--- tests/tck/features/path/NoLoop.IntVid.feature | 114 +++++--- tests/tck/features/path/NoLoop.feature | 114 +++++--- .../features/path/ShortestPath.IntVid.feature | 251 +++++++++++------- tests/tck/features/path/ShortestPath.feature | 251 +++++++++++------- 67 files changed, 797 insertions(+), 586 deletions(-) diff --git a/.linters/cpp/checkKeyword.py b/.linters/cpp/checkKeyword.py index 11ef9ea941d..b16ce0ba2b8 100755 --- a/.linters/cpp/checkKeyword.py +++ b/.linters/cpp/checkKeyword.py @@ -37,6 +37,7 @@ 'KW_WHEN', 'KW_DELETE', 'KW_FIND', + 'KW_PATH', 'KW_LOOKUP', 'KW_ALTER', 'KW_STEPS', diff --git a/src/graph/context/ast/QueryAstContext.h b/src/graph/context/ast/QueryAstContext.h index 683df2e2a73..0f6f71f0029 100644 --- a/src/graph/context/ast/QueryAstContext.h +++ b/src/graph/context/ast/QueryAstContext.h @@ -43,6 +43,7 @@ struct PathContext final : AstContext { StepClause steps; Over over; Expression* filter{nullptr}; + std::vector colNames; /* * find path from A to B OR find path from $-.src to $-.dst diff --git a/src/graph/optimizer/OptGroup.h b/src/graph/optimizer/OptGroup.h index 374a69e998a..d65fc9d9565 100644 --- a/src/graph/optimizer/OptGroup.h +++ b/src/graph/optimizer/OptGroup.h @@ -6,10 +6,6 @@ #ifndef GRAPH_OPTIMIZER_OPTGROUP_H_ #define GRAPH_OPTIMIZER_OPTGROUP_H_ -#include -#include -#include - #include "common/base/Status.h" namespace nebula { diff --git a/src/graph/optimizer/Optimizer.h b/src/graph/optimizer/Optimizer.h index b131fb89716..803a06bd417 100644 --- a/src/graph/optimizer/Optimizer.h +++ b/src/graph/optimizer/Optimizer.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_OPTIMIZER_H_ #define GRAPH_OPTIMIZER_OPTIMIZER_H_ -#include - #include "common/base/Base.h" #include "common/base/StatusOr.h" diff --git a/src/graph/optimizer/OptimizerUtils.cpp b/src/graph/optimizer/OptimizerUtils.cpp index 2d3f3afbe9e..679453a61c8 100644 --- a/src/graph/optimizer/OptimizerUtils.cpp +++ b/src/graph/optimizer/OptimizerUtils.cpp @@ -5,21 +5,9 @@ #include "graph/optimizer/OptimizerUtils.h" -#include -#include -#include -#include - #include "common/base/Status.h" #include "common/datatypes/Value.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/PropertyExpression.h" -#include "common/expression/RelationalExpression.h" #include "graph/planner/plan/Query.h" -#include "interface/gen-cpp2/meta_types.h" -#include "interface/gen-cpp2/storage_types.h" using nebula::meta::cpp2::ColumnDef; using nebula::meta::cpp2::IndexItem; diff --git a/src/graph/optimizer/rule/CollapseProjectRule.h b/src/graph/optimizer/rule/CollapseProjectRule.h index b2b7a74d24f..3b57da7cfa0 100644 --- a/src/graph/optimizer/rule/CollapseProjectRule.h +++ b/src/graph/optimizer/rule/CollapseProjectRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_COLLAPSEPROJECTRULE_H_ #define GRAPH_OPTIMIZER_RULE_COLLAPSEPROJECTRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/CombineFilterRule.h b/src/graph/optimizer/rule/CombineFilterRule.h index 9b69723bfb4..0bc8d78b1f1 100644 --- a/src/graph/optimizer/rule/CombineFilterRule.h +++ b/src/graph/optimizer/rule/CombineFilterRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_COMBINEFILTERRULE_H_ #define GRAPH_OPTIMIZER_RULE_COMBINEFILTERRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp b/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp index 3acad7ba487..210c37539c9 100644 --- a/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp +++ b/src/graph/optimizer/rule/GeoPredicateIndexScanBaseRule.cpp @@ -5,8 +5,6 @@ #include "graph/optimizer/rule/GeoPredicateIndexScanBaseRule.h" -#include "common/expression/Expression.h" -#include "common/expression/LogicalExpression.h" #include "common/geo/GeoIndex.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" @@ -16,7 +14,6 @@ #include "graph/planner/plan/Query.h" #include "graph/planner/plan/Scan.h" #include "graph/util/ExpressionUtils.h" -#include "interface/gen-cpp2/storage_types.h" using nebula::graph::Filter; using nebula::graph::IndexScan; diff --git a/src/graph/optimizer/rule/IndexScanRule.cpp b/src/graph/optimizer/rule/IndexScanRule.cpp index 454e08e2d5a..a7e4ad3601a 100644 --- a/src/graph/optimizer/rule/IndexScanRule.cpp +++ b/src/graph/optimizer/rule/IndexScanRule.cpp @@ -5,8 +5,6 @@ #include "graph/optimizer/rule/IndexScanRule.h" -#include - #include "common/expression/LabelAttributeExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" diff --git a/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.cpp b/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.cpp index 14c98768e70..92964ec0f47 100644 --- a/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.cpp +++ b/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.cpp @@ -5,24 +5,12 @@ #include "graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.h" -#include -#include -#include - -#include "common/base/Base.h" -#include "common/base/Status.h" -#include "common/expression/Expression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/PropertyExpression.h" -#include "common/expression/RelationalExpression.h" #include "graph/context/QueryContext.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/optimizer/OptimizerUtils.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Scan.h" -#include "interface/gen-cpp2/meta_types.h" -#include "interface/gen-cpp2/storage_types.h" using nebula::Expression; using nebula::graph::EdgeIndexFullScan; diff --git a/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.h b/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.h index b4588a6ea1c..f4084b35dd8 100644 --- a/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.h +++ b/src/graph/optimizer/rule/OptimizeEdgeIndexScanByFilterRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_OPTIMIZEEDGEINDEXSCANBYFILTERRULE_H_ #define GRAPH_OPTIMIZER_RULE_OPTIMIZEEDGEINDEXSCANBYFILTERRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.cpp b/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.cpp index 685a0e51187..008cce33843 100644 --- a/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.cpp +++ b/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.cpp @@ -5,7 +5,6 @@ #include "graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.h" -#include "common/expression/Expression.h" #include "graph/context/QueryContext.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" @@ -13,7 +12,6 @@ #include "graph/optimizer/rule/IndexScanRule.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Scan.h" -#include "interface/gen-cpp2/storage_types.h" using nebula::graph::Filter; using nebula::graph::OptimizerUtils; diff --git a/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.h b/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.h index 92db526b751..6d84386603e 100644 --- a/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.h +++ b/src/graph/optimizer/rule/OptimizeTagIndexScanByFilterRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_OPTIMIZETAGINDEXSCANBYFILTERRULE_H_ #define GRAPH_OPTIMIZER_RULE_OPTIMIZETAGINDEXSCANBYFILTERRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushFilterDownAggregateRule.h b/src/graph/optimizer/rule/PushFilterDownAggregateRule.h index 547e7f04106..80720935296 100644 --- a/src/graph/optimizer/rule/PushFilterDownAggregateRule.h +++ b/src/graph/optimizer/rule/PushFilterDownAggregateRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNAGGREGATERULE_H_ #define GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNAGGREGATERULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.cpp b/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.cpp index 110f183af80..741e9981e12 100644 --- a/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.cpp +++ b/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.cpp @@ -5,12 +5,7 @@ #include "graph/optimizer/rule/PushFilterDownGetNbrsRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" #include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" diff --git a/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.h b/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.h index 208068b1903..4097b39e984 100644 --- a/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.h +++ b/src/graph/optimizer/rule/PushFilterDownGetNbrsRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNGETNBRSRULE_H_ #define GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNGETNBRSRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushFilterDownLeftJoinRule.h b/src/graph/optimizer/rule/PushFilterDownLeftJoinRule.h index 6e87e32b4d1..99959da8114 100644 --- a/src/graph/optimizer/rule/PushFilterDownLeftJoinRule.h +++ b/src/graph/optimizer/rule/PushFilterDownLeftJoinRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNLEFTJOINRULE_H_ #define GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNLEFTJOINRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushFilterDownProjectRule.h b/src/graph/optimizer/rule/PushFilterDownProjectRule.h index f0846afe0d0..4df864325f0 100644 --- a/src/graph/optimizer/rule/PushFilterDownProjectRule.h +++ b/src/graph/optimizer/rule/PushFilterDownProjectRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNPROJECTRULE_H_ #define GRAPH_OPTIMIZER_RULE_PUSHFILTERDOWNPROJECTRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.cpp index ffe961cd9c1..81adac8747a 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.cpp @@ -5,12 +5,6 @@ #include "graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.h b/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.h index ed7f5d11676..4e55ff6eb8a 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexFullScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.cpp index 02d45fd7b97..c5488e6df7d 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.cpp @@ -5,12 +5,6 @@ #include "graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.h b/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.h index 91419dd836c..648a80288bb 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexPrefixScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.cpp index 1762458d8be..a5ca98d103e 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.cpp @@ -5,12 +5,6 @@ #include "graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" diff --git a/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.h b/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.h index c07d79d6077..c7678790474 100644 --- a/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownEdgeIndexRangeScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.cpp b/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.cpp index acd2c050861..ce806640403 100644 --- a/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownGetNeighborsRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::GetNeighbors; using nebula::graph::Limit; diff --git a/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.h b/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.h index 06822f6f7c8..3ee9593d3de 100644 --- a/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.h +++ b/src/graph/optimizer/rule/PushLimitDownGetNeighborsRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownIndexScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownIndexScanRule.cpp index 1138efd4c30..f1bf69c6770 100644 --- a/src/graph/optimizer/rule/PushLimitDownIndexScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownIndexScanRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownIndexScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::IndexScan; using nebula::graph::Limit; diff --git a/src/graph/optimizer/rule/PushLimitDownIndexScanRule.h b/src/graph/optimizer/rule/PushLimitDownIndexScanRule.h index 5d2684654fb..56bda90c366 100644 --- a/src/graph/optimizer/rule/PushLimitDownIndexScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownIndexScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownProjectRule.cpp b/src/graph/optimizer/rule/PushLimitDownProjectRule.cpp index 201367738b5..32025332d97 100644 --- a/src/graph/optimizer/rule/PushLimitDownProjectRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownProjectRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownProjectRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::Limit; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/PushLimitDownProjectRule.h b/src/graph/optimizer/rule/PushLimitDownProjectRule.h index dba00e26192..bef89fcbd51 100644 --- a/src/graph/optimizer/rule/PushLimitDownProjectRule.h +++ b/src/graph/optimizer/rule/PushLimitDownProjectRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.cpp index 1a633b13440..6a34a7b1bfe 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Scan.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::Limit; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.h b/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.h index 27aa4780662..b3c5297d1c7 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexFullScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.cpp index a462e2296af..4e2fbd8b02d 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Scan.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::Limit; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.h b/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.h index d4ce7bf66ae..1c5064e2243 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexPrefixScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.cpp b/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.cpp index 12face68cee..209deac0482 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.cpp +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Scan.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::Limit; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.h b/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.h index 38f0d52a1fe..2e42c47298b 100644 --- a/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.h +++ b/src/graph/optimizer/rule/PushLimitDownTagIndexRangeScanRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.cpp b/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.cpp index 5525f81d27a..460725bc90f 100644 --- a/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.cpp +++ b/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::GetNeighbors; using nebula::graph::Limit; diff --git a/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.h b/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.h index 938137ed1e4..657155590cb 100644 --- a/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.h +++ b/src/graph/optimizer/rule/PushStepLimitDownGetNeighborsRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.cpp b/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.cpp index fa38e865a11..30d9afb4c74 100644 --- a/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.cpp +++ b/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.cpp @@ -5,17 +5,10 @@ #include "graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.h" -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::GetNeighbors; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.h b/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.h index 714d7a26afa..4ea8e8a6f96 100644 --- a/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.h +++ b/src/graph/optimizer/rule/PushStepSampleDownGetNeighborsRule.h @@ -5,8 +5,6 @@ #pragma once -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/RemoveNoopProjectRule.h b/src/graph/optimizer/rule/RemoveNoopProjectRule.h index f1521613e2b..c8d96d6cf2a 100644 --- a/src/graph/optimizer/rule/RemoveNoopProjectRule.h +++ b/src/graph/optimizer/rule/RemoveNoopProjectRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_REMOVENOOPPROJECTRULE_H_ #define GRAPH_OPTIMIZER_RULE_REMOVENOOPPROJECTRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/TopNRule.cpp b/src/graph/optimizer/rule/TopNRule.cpp index 6db4f6c3ab0..194ec2788bd 100644 --- a/src/graph/optimizer/rule/TopNRule.cpp +++ b/src/graph/optimizer/rule/TopNRule.cpp @@ -4,18 +4,10 @@ */ #include "graph/optimizer/rule/TopNRule.h" - -#include "common/expression/BinaryExpression.h" -#include "common/expression/ConstantExpression.h" -#include "common/expression/Expression.h" -#include "common/expression/FunctionCallExpression.h" -#include "common/expression/LogicalExpression.h" -#include "common/expression/UnaryExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" #include "graph/planner/plan/Query.h" -#include "graph/visitor/ExtractFilterExprVisitor.h" using nebula::graph::Limit; using nebula::graph::PlanNode; diff --git a/src/graph/optimizer/rule/TopNRule.h b/src/graph/optimizer/rule/TopNRule.h index 879c1a2fd59..4e68ac89ee2 100644 --- a/src/graph/optimizer/rule/TopNRule.h +++ b/src/graph/optimizer/rule/TopNRule.h @@ -6,8 +6,6 @@ #ifndef GRAPH_OPTIMIZER_RULE_TOPNRULE_H_ #define GRAPH_OPTIMIZER_RULE_TOPNRULE_H_ -#include - #include "graph/optimizer/OptRule.h" namespace nebula { diff --git a/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp b/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp index bad83bb4bbc..2c3904b6df8 100644 --- a/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp +++ b/src/graph/optimizer/rule/UnionAllIndexScanBaseRule.cpp @@ -5,8 +5,6 @@ #include "graph/optimizer/rule/UnionAllIndexScanBaseRule.h" -#include "common/expression/Expression.h" -#include "common/expression/LogicalExpression.h" #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/optimizer/OptRule.h" @@ -15,7 +13,6 @@ #include "graph/planner/plan/Query.h" #include "graph/planner/plan/Scan.h" #include "graph/util/ExpressionUtils.h" -#include "interface/gen-cpp2/storage_types.h" using nebula::graph::Filter; using nebula::graph::IndexScan; diff --git a/src/graph/planner/ngql/PathPlanner.cpp b/src/graph/planner/ngql/PathPlanner.cpp index f6f7843788c..5b72128b711 100644 --- a/src/graph/planner/ngql/PathPlanner.cpp +++ b/src/graph/planner/ngql/PathPlanner.cpp @@ -271,7 +271,7 @@ SubPlan PathPlanner::singlePairPlan(PlanNode* dep) { auto* dc = DataCollect::make(qctx, DataCollect::DCKind::kBFSShortest); dc->setInputVars({conjunct->outputVar()}); dc->addDep(loop); - dc->setColNames({"path"}); + dc->setColNames(pathCtx_->colNames); SubPlan subPlan; subPlan.root = dc; @@ -321,7 +321,7 @@ SubPlan PathPlanner::allPairPlan(PlanNode* dep) { auto* dc = DataCollect::make(qctx, DataCollect::DCKind::kAllPaths); dc->addDep(loop); dc->setInputVars({conjunct->outputVar()}); - dc->setColNames({"path"}); + dc->setColNames(pathCtx_->colNames); SubPlan subPlan; subPlan.root = dc; @@ -374,7 +374,7 @@ SubPlan PathPlanner::multiPairPlan(PlanNode* dep) { auto* dc = DataCollect::make(qctx, DataCollect::DCKind::kMultiplePairShortest); dc->addDep(loop); dc->setInputVars({conjunct->outputVar()}); - dc->setColNames({"path"}); + dc->setColNames(pathCtx_->colNames); SubPlan subPlan; subPlan.root = dc; @@ -503,18 +503,20 @@ PlanNode* PathPlanner::buildPathProp(PlanNode* dep) { dc->addDep(vertexPlan); dc->addDep(edgePlan); dc->setInputVars({vertexPlan->outputVar(), edgePlan->outputVar(), dep->outputVar()}); - dc->setColNames({"path"}); + dc->setColNames(std::move(pathCtx_->colNames)); return dc; } StatusOr PathPlanner::transform(AstContext* astCtx) { pathCtx_ = static_cast(astCtx); + auto qctx = pathCtx_->qctx; + auto& from = pathCtx_->from; + auto& to = pathCtx_->to; + buildStart(from, pathCtx_->fromVidsVar, false); + buildStart(to, pathCtx_->toVidsVar, true); - buildStart(pathCtx_->from, pathCtx_->fromVidsVar, false); - buildStart(pathCtx_->to, pathCtx_->toVidsVar, true); - - auto* startNode = StartNode::make(pathCtx_->qctx); - auto* pt = PassThroughNode::make(pathCtx_->qctx, startNode); + auto* startNode = StartNode::make(qctx); + auto* pt = PassThroughNode::make(qctx, startNode); SubPlan subPlan; do { @@ -522,7 +524,7 @@ StatusOr PathPlanner::transform(AstContext* astCtx) { subPlan = allPairPlan(pt); break; } - if (pathCtx_->from.vids.size() == 1 && pathCtx_->to.vids.size() == 1) { + if (from.vids.size() == 1 && to.vids.size() == 1) { subPlan = singlePairPlan(pt); break; } diff --git a/src/graph/validator/FindPathValidator.cpp b/src/graph/validator/FindPathValidator.cpp index 97409ead1b5..8fcf186f2df 100644 --- a/src/graph/validator/FindPathValidator.cpp +++ b/src/graph/validator/FindPathValidator.cpp @@ -5,7 +5,6 @@ #include "graph/validator/FindPathValidator.h" -#include "common/expression/VariableExpression.h" #include "graph/planner/plan/Algo.h" #include "graph/planner/plan/Logic.h" #include "graph/util/ValidateUtil.h" @@ -25,8 +24,8 @@ Status FindPathValidator::validateImpl() { NG_RETURN_IF_ERROR(ValidateUtil::validateOver(qctx_, fpSentence->over(), pathCtx_->over)); NG_RETURN_IF_ERROR(validateWhere(fpSentence->where())); NG_RETURN_IF_ERROR(ValidateUtil::validateStep(fpSentence->step(), pathCtx_->steps)); + NG_RETURN_IF_ERROR(validateYield(fpSentence->yield())); - outputs_.emplace_back("path", Value::Type::PATH); return Status::OK(); } @@ -63,5 +62,22 @@ Status FindPathValidator::validateWhere(WhereClause* where) { return Status::OK(); } +Status FindPathValidator::validateYield(YieldClause* yield) { + if (yield == nullptr) { + return Status::SemanticError("Missing yield clause."); + } + if (yield->columns().size() != 1) { + return Status::SemanticError("Only support yield path"); + } + auto col = yield->columns().front(); + if (col->expr()->kind() != Expression::Kind::kLabel || col->expr()->toString() != "PATH") { + return Status::SemanticError("Illegal yield clauses `%s'. only support yield path", + col->toString().c_str()); + } + outputs_.emplace_back(col->name(), Value::Type::PATH); + pathCtx_->colNames = getOutColNames(); + return Status::OK(); +} + } // namespace graph } // namespace nebula diff --git a/src/graph/validator/FindPathValidator.h b/src/graph/validator/FindPathValidator.h index 3142f268463..95a5f64a2ad 100644 --- a/src/graph/validator/FindPathValidator.h +++ b/src/graph/validator/FindPathValidator.h @@ -23,6 +23,8 @@ class FindPathValidator final : public Validator { Status validateWhere(WhereClause* where); + Status validateYield(YieldClause* yield); + private: std::unique_ptr pathCtx_; }; diff --git a/src/graph/validator/GetSubgraphValidator.cpp b/src/graph/validator/GetSubgraphValidator.cpp index df02bef7485..c0d9f4a5576 100644 --- a/src/graph/validator/GetSubgraphValidator.cpp +++ b/src/graph/validator/GetSubgraphValidator.cpp @@ -5,9 +5,6 @@ #include "graph/validator/GetSubgraphValidator.h" -#include - -#include "graph/context/QueryExpressionContext.h" #include "graph/planner/plan/Logic.h" #include "graph/planner/plan/Query.h" #include "graph/util/ValidateUtil.h" diff --git a/src/graph/validator/test/FindPathValidatorTest.cpp b/src/graph/validator/test/FindPathValidatorTest.cpp index 38e2e4835a9..03ba19fc4d5 100644 --- a/src/graph/validator/test/FindPathValidatorTest.cpp +++ b/src/graph/validator/test/FindPathValidatorTest.cpp @@ -17,9 +17,46 @@ class FindPathValidatorTest : public ValidatorTestBase { using PK = nebula::graph::PlanNode::Kind; +TEST_F(FindPathValidatorTest, invalidYield) { + { + std::string query = "FIND SHORTEST PATH FROM \"Tim\" TO \"Tony\" OVER *"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), "SemanticError: Missing yield clause."); + } + { + std::string query = "FIND SHORTEST PATH FROM \"Tim\" TO \"Tony\" OVER * YIELD vertex"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SyntaxError: please add alias when using `vertex'. near `vertex'"); + } + { + std::string query = + "FIND ALL PATH WITH PROP FROM \"Tim\" TO \"Tony\" OVER like YIELD edge as e"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SemanticError: Illegal yield clauses `EDGE AS e'. only support yield path"); + } + { + std::string query = + "FIND NOLOOP PATH WITH PROP FROM \"Tim\" TO \"Yao\" OVER teammate YIELD path"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SyntaxError: please add alias when using `path'. near `path'"); + } + { + std::string query = + "FIND NOLOOP PATH WITH PROP FROM \"Tim\" TO \"Yao\" OVER * YIELD " + "$$.player.name"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), + "SemanticError: Illegal yield clauses `$$.player.name'. only support yield path"); + } +} + TEST_F(FindPathValidatorTest, SinglePairPath) { { - std::string query = "FIND SHORTEST PATH FROM \"1\" TO \"2\" OVER like UPTO 5 STEPS"; + std::string query = + "FIND SHORTEST PATH FROM \"1\" TO \"2\" OVER like UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -35,7 +72,8 @@ TEST_F(FindPathValidatorTest, SinglePairPath) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "FIND SHORTEST PATH FROM \"1\" TO \"2\" OVER like, serve UPTO 5 STEPS"; + std::string query = + "FIND SHORTEST PATH FROM \"1\" TO \"2\" OVER like, serve UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -54,7 +92,8 @@ TEST_F(FindPathValidatorTest, SinglePairPath) { TEST_F(FindPathValidatorTest, MultiPairPath) { { - std::string query = "FIND SHORTEST PATH FROM \"1\" TO \"2\",\"3\" OVER like UPTO 5 STEPS"; + std::string query = + "FIND SHORTEST PATH FROM \"1\" TO \"2\",\"3\" OVER like UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -75,7 +114,7 @@ TEST_F(FindPathValidatorTest, MultiPairPath) { { std::string query = "FIND SHORTEST PATH FROM \"1\",\"2\" TO \"3\",\"4\" OVER like UPTO 5 " - "STEPS"; + "STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -97,7 +136,7 @@ TEST_F(FindPathValidatorTest, MultiPairPath) { TEST_F(FindPathValidatorTest, ALLPath) { { - std::string query = "FIND ALL PATH FROM \"1\" TO \"2\" OVER like UPTO 5 STEPS"; + std::string query = "FIND ALL PATH FROM \"1\" TO \"2\" OVER like UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -115,7 +154,8 @@ TEST_F(FindPathValidatorTest, ALLPath) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "FIND ALL PATH FROM \"1\" TO \"2\",\"3\" OVER like UPTO 5 STEPS"; + std::string query = + "FIND ALL PATH FROM \"1\" TO \"2\",\"3\" OVER like UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -139,7 +179,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src, like._dst AS dst " " | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 " - "STEPS"; + "STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -166,7 +206,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src, like._dst AS dst " - " | FIND ALL PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS"; + " | FIND ALL PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -193,7 +233,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src, like._dst AS dst " " | FIND SHORTEST PATH FROM \"2\" TO $-.dst OVER like, serve UPTO 5 " - "STEPS"; + "STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -219,7 +259,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src, like._dst AS dst " " | FIND SHORTEST PATH FROM $-.src TO \"2\" OVER like, serve UPTO 5 " - "STEPS"; + "STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -245,7 +285,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { std::string query = "$a = GO FROM \"1\" OVER like yield like._src AS src; " "GO FROM \"2\" OVER like yield like._src AS src, like._dst AS dst " - " | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS"; + " | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -275,7 +315,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { std::string query = "YIELD \"1\" AS src, \"2\" AS dst" " | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 " - "STEPS"; + "STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -301,7 +341,7 @@ TEST_F(FindPathValidatorTest, RunTimePath) { { std::string query = "YIELD \"1\" AS src, \"2\" AS dst" - " | FIND ALL PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS"; + " | FIND ALL PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -329,7 +369,7 @@ TEST_F(FindPathValidatorTest, PathWithFilter) { { std::string query = "FIND ALL PATH FROM \"1\" TO \"2\" OVER like WHERE like.likeness > 30 " - "UPTO 5 STEPS"; + "UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -351,7 +391,7 @@ TEST_F(FindPathValidatorTest, PathWithFilter) { { std::string query = "FIND SHORTEST PATH FROM \"1\" TO \"2\" OVER like WHERE like.likeness " - "> 30 UPTO 5 STEPS"; + "> 30 UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, @@ -371,7 +411,7 @@ TEST_F(FindPathValidatorTest, PathWithFilter) { { std::string query = "FIND SHORTEST PATH FROM \"1\" TO \"2\", \"3\" OVER like WHERE " - "like.likeness > 30 UPTO 5 STEPS"; + "like.likeness > 30 UPTO 5 STEPS YIELD path as p"; std::vector expected = { PK::kDataCollect, PK::kLoop, diff --git a/src/graph/validator/test/GetSubgraphValidatorTest.cpp b/src/graph/validator/test/GetSubgraphValidatorTest.cpp index 3df19562e7c..61ef6d9814e 100644 --- a/src/graph/validator/test/GetSubgraphValidatorTest.cpp +++ b/src/graph/validator/test/GetSubgraphValidatorTest.cpp @@ -190,7 +190,7 @@ TEST_F(GetSubgraphValidatorTest, invalidYield) { std::string query = "GET SUBGRAPH WITH PROP FROM \"Tim Duncan\" YIELD path"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), - "SemanticError: Get Subgraph only support YIELD vertices OR edges"); + "SyntaxError: please add alias when using `path'. near `path'"); } { std::string query = "GET SUBGRAPH WITH PROP FROM \"Tim Duncan\" YIELD 123"; diff --git a/src/graph/validator/test/QueryValidatorTest.cpp b/src/graph/validator/test/QueryValidatorTest.cpp index 6af43b84454..fe344e6f2c8 100644 --- a/src/graph/validator/test/QueryValidatorTest.cpp +++ b/src/graph/validator/test/QueryValidatorTest.cpp @@ -953,9 +953,10 @@ TEST_F(QueryValidatorTest, GoInvalid) { "SemanticError: `VERTEX AS v' is not support in go sentence."); } { - std::string query = "GO FROM \"Tim\" OVER * YIELD path as p"; + std::string query = "GO FROM \"Tim\" OVER * YIELD path"; auto result = checkResult(query); - EXPECT_EQ(std::string(result.message()), "SemanticError: Invalid label identifiers: path"); + EXPECT_EQ(std::string(result.message()), + "SyntaxError: please add alias when using `path'. near `path'"); } { std::string query = "GO FROM \"Tim\" OVER * YIELD $$"; diff --git a/src/graph/validator/test/YieldValidatorTest.cpp b/src/graph/validator/test/YieldValidatorTest.cpp index 1dec793100b..3ccb89d0bd7 100644 --- a/src/graph/validator/test/YieldValidatorTest.cpp +++ b/src/graph/validator/test/YieldValidatorTest.cpp @@ -223,7 +223,7 @@ TEST_F(YieldValidatorTest, TypeCastTest) { { std::string query = "YIELD (PATH)true"; auto result = checkResult(query); - EXPECT_EQ(std::string(result.message()), "SyntaxError: syntax error near `true'"); + EXPECT_EQ(std::string(result.message()), "SyntaxError: syntax error near `PATH'"); } { std::string query = "YIELD (NOEXIST)true"; diff --git a/src/graph/visitor/FindVisitor.cpp b/src/graph/visitor/FindVisitor.cpp index ce6d7798e4f..1c31af18c97 100644 --- a/src/graph/visitor/FindVisitor.cpp +++ b/src/graph/visitor/FindVisitor.cpp @@ -173,6 +173,15 @@ void FindVisitor::visit(EdgeExpression* expr) { findInCurrentExpr(expr); } void FindVisitor::visit(ColumnExpression* expr) { findInCurrentExpr(expr); } +void FindVisitor::visit(PathBuildExpression* expr) { + findInCurrentExpr(expr); + if (!needFindAll_ && !foundExprs_.empty()) return; + for (const auto& item : expr->items()) { + item->accept(this); + if (!needFindAll_ && !foundExprs_.empty()) return; + } +} + void FindVisitor::visit(SubscriptRangeExpression* expr) { findInCurrentExpr(expr); if (!needFindAll_ && !foundExprs_.empty()) return; diff --git a/src/graph/visitor/FindVisitor.h b/src/graph/visitor/FindVisitor.h index 8ade58a40fe..32d6446f891 100644 --- a/src/graph/visitor/FindVisitor.h +++ b/src/graph/visitor/FindVisitor.h @@ -62,6 +62,7 @@ class FindVisitor final : public ExprVisitorImpl { void visit(LabelAttributeExpression* expr) override; void visit(VertexExpression* expr) override; void visit(EdgeExpression* expr) override; + void visit(PathBuildExpression* expr) override; void visit(ColumnExpression* expr) override; void visit(ListComprehensionExpression* expr) override; void visit(SubscriptRangeExpression* expr) override; diff --git a/src/parser/TraverseSentences.cpp b/src/parser/TraverseSentences.cpp index b2409a6649c..037faf56fa4 100644 --- a/src/parser/TraverseSentences.cpp +++ b/src/parser/TraverseSentences.cpp @@ -181,35 +181,38 @@ std::string GroupBySentence::toString() const { std::string FindPathSentence::toString() const { std::string buf; buf.reserve(256); - buf += "FIND "; + buf += "FIND"; if (noLoop_) { - buf += "NOLOOP PATH "; + buf += " NOLOOP PATH"; } else if (isShortest_) { - buf += "SHORTEST PATH "; + buf += " SHORTEST PATH"; } else { - buf += "ALL PATH "; + buf += " ALL PATH"; } if (from_ != nullptr) { - buf += from_->toString(); buf += " "; + buf += from_->toString(); } if (to_ != nullptr) { - buf += to_->toString(); buf += " "; + buf += to_->toString(); } if (over_ != nullptr) { - buf += over_->toString(); buf += " "; + buf += over_->toString(); } if (where_ != nullptr) { - buf += where_->toString(); buf += " "; + buf += where_->toString(); } if (step_ != nullptr) { - buf += "UPTO "; + buf += " UPTO "; buf += step_->toString(); + } + if (yield_ != nullptr) { buf += " "; + buf += yield_->toString(); } return buf; } diff --git a/src/parser/TraverseSentences.h b/src/parser/TraverseSentences.h index 8d1ce13e42d..d0c416d1913 100644 --- a/src/parser/TraverseSentences.h +++ b/src/parser/TraverseSentences.h @@ -320,6 +320,8 @@ class FindPathSentence final : public Sentence { void setWhere(WhereClause* clause) { where_.reset(clause); } + void setYield(YieldClause* yield) { yield_.reset(yield); } + FromClause* from() const { return from_.get(); } ToClause* to() const { return to_.get(); } @@ -330,6 +332,8 @@ class FindPathSentence final : public Sentence { WhereClause* where() const { return where_.get(); } + YieldClause* yield() const { return yield_.get(); } + bool isShortest() const { return isShortest_; } bool withProp() const { return withProp_; } @@ -347,6 +351,7 @@ class FindPathSentence final : public Sentence { std::unique_ptr over_; std::unique_ptr step_; std::unique_ptr where_; + std::unique_ptr yield_; }; class LimitSentence final : public Sentence { diff --git a/src/parser/parser.yy b/src/parser/parser.yy index a0fac7dae9a..135fa4335b4 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -496,7 +496,6 @@ unreserved_keyword | KW_BOTH { $$ = new std::string("both"); } | KW_OUT { $$ = new std::string("out"); } | KW_SUBGRAPH { $$ = new std::string("subgraph"); } - | KW_PATH { $$ = new std::string("path"); } | KW_THEN { $$ = new std::string("then"); } | KW_ELSE { $$ = new std::string("else"); } | KW_END { $$ = new std::string("end"); } @@ -1438,6 +1437,14 @@ yield_column $$ = new YieldColumn(LabelExpression::make(qctx->objPool(), "EDGES"), *$3); delete $3; } + | KW_PATH { + $$ = nullptr; + throw nebula::GraphParser::syntax_error(@1, "please add alias when using `path'."); + } + | KW_PATH KW_AS name_label { + $$ = new YieldColumn(LabelExpression::make(qctx->objPool(), "PATH"), *$3); + delete $3; + } | expression { $$ = new YieldColumn($1); } @@ -2077,31 +2084,34 @@ fetch_sentence ; find_path_sentence - : KW_FIND KW_ALL KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { + : KW_FIND KW_ALL KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause yield_clause { auto *s = new FindPathSentence(false, $4, false); s->setFrom($5); s->setTo($6); s->setOver($7); s->setWhere($8); s->setStep($9); + s->setYield($10); $$ = s; } - | KW_FIND KW_SHORTEST KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { + | KW_FIND KW_SHORTEST KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause yield_clause { auto *s = new FindPathSentence(true, $4, false); s->setFrom($5); s->setTo($6); s->setOver($7); s->setWhere($8); s->setStep($9); + s->setYield($10); $$ = s; } - | KW_FIND KW_NOLOOP KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause { + | KW_FIND KW_NOLOOP KW_PATH opt_with_properties from_clause to_clause over_clause where_clause find_path_upto_clause yield_clause { auto *s = new FindPathSentence(false, $4, true); s->setFrom($5); s->setTo($6); s->setOver($7); s->setWhere($8); s->setStep($9); + s->setYield($10); $$ = s; } ; diff --git a/src/parser/scanner.lex b/src/parser/scanner.lex index 2b209bee00d..ffe942cfec8 100644 --- a/src/parser/scanner.lex +++ b/src/parser/scanner.lex @@ -73,6 +73,7 @@ IP_OCTET ([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]) "WHEN" { return TokenType::KW_WHEN; } "DELETE" { return TokenType::KW_DELETE; } "FIND" { return TokenType::KW_FIND; } +"PATH" { return TokenType::KW_PATH; } "LOOKUP" { return TokenType::KW_LOOKUP; } "ALTER" { return TokenType::KW_ALTER; } "STEPS" { return TokenType::KW_STEPS; } @@ -205,7 +206,6 @@ IP_OCTET ([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]) "STORAGE" { return TokenType::KW_STORAGE; } "SHORTEST" { return TokenType::KW_SHORTEST; } "NOLOOP" { return TokenType::KW_NOLOOP; } -"PATH" { return TokenType::KW_PATH; } "OUT" { return TokenType::KW_OUT; } "BOTH" { return TokenType::KW_BOTH; } "SUBGRAPH" { return TokenType::KW_SUBGRAPH; } diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index ba3fd9ca79a..2fbbc03a56f 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -1659,7 +1659,7 @@ TEST_F(ParserTest, UnreservedKeywords) { std::string query = "CREATE TAG tag1(space string, spaces string, " "email string, password string, roles string, uuid int, " - "path string, variables string, leader string, data string)"; + "paths string, variables string, leader string, data string)"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } @@ -2506,7 +2506,7 @@ TEST_F(ParserTest, Match) { ASSERT_TRUE(result.ok()) << result.status(); } { - std::string query = "MATCH p = (a) -[m:like*..2]- (b) RETURN p as Path"; + std::string query = "MATCH p = (a) -[m:like*..2]- (b) RETURN p as PathA"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } diff --git a/tests/query/stateless/test_keyword.py b/tests/query/stateless/test_keyword.py index 74f8fbabb4a..0c970615f69 100644 --- a/tests/query/stateless/test_keyword.py +++ b/tests/query/stateless/test_keyword.py @@ -124,7 +124,7 @@ def test_keywords1(self): resp = self.execute(cmd) self.check_resp_succeeded(resp) - cmd = 'create tag x23 (path string)' + cmd = 'create tag x23 (paths string)' resp = self.execute(cmd) self.check_resp_succeeded(resp) @@ -280,7 +280,7 @@ def test_keywords1(self): resp = self.execute(cmd) self.check_resp_succeeded(resp) - cmd = 'create tag x233 (PATH string)' + cmd = 'create tag x233 (PATHS string)' resp = self.execute(cmd) self.check_resp_succeeded(resp) diff --git a/tests/tck/features/match/Unwind.feature b/tests/tck/features/match/Unwind.feature index 5648eb25dad..718a18199ac 100644 --- a/tests/tck/features/match/Unwind.feature +++ b/tests/tck/features/match/Unwind.feature @@ -119,14 +119,14 @@ Feature: Unwind clause Scenario: unwind match with When executing query: """ - MATCH path=(x:player{name: "Tim Duncan"})-[:like*..2]->(y) - UNWIND nodes(path) as n - WITH path, size(collect(distinct n)) AS testLength - WHERE testLength == length(path) + 1 - RETURN path + MATCH p = (x:player{name: "Tim Duncan"})-[:like*..2]->(y) + UNWIND nodes(p) as n + WITH p, size(collect(distinct n)) AS testLength + WHERE testLength == length(p) + 1 + RETURN p """ Then the result should be, in any order: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | diff --git a/tests/tck/features/path/AllPath.IntVid.feature b/tests/tck/features/path/AllPath.IntVid.feature index 3f20e83bd1c..18ac5bc4d1c 100644 --- a/tests/tck/features/path/AllPath.IntVid.feature +++ b/tests/tck/features/path/AllPath.IntVid.feature @@ -7,10 +7,10 @@ Feature: Integer Vid All Path Given a graph with space named "nba_int_vid" When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tim Duncan") OVER * UPTO 2 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tim Duncan") OVER * UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tim Duncan")-[:teammate]->("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:teammate]->("Tim Duncan")> | @@ -23,19 +23,19 @@ Feature: Integer Vid All Path | <("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")-[:like]->("Tim Duncan")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Manu Ginobili") OVER like UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Manu Ginobili") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -44,20 +44,20 @@ Feature: Integer Vid All Path | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | @@ -73,11 +73,11 @@ Feature: Integer Vid All Path Given a graph with space named "nba_int_vid" When executing query: """ - GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst - | FIND ALL PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS + GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst | + FIND ALL PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS YIELD PATH as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -88,10 +88,10 @@ Feature: Integer Vid All Path When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS + FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -101,11 +101,11 @@ Feature: Integer Vid All Path | <("Tony Parker")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 3 + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:serve]->("Spurs")> | @@ -115,11 +115,11 @@ Feature: Integer Vid All Path When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker")-[:like@0]->("LaMarcus Aldridge")-[:like@0]->("Tim Duncan")> | | <("Tony Parker")-[:like@0]->("LaMarcus Aldridge")-[:like@0]->("Tony Parker")-[:like@0]->("Tim Duncan")> | | <("Tony Parker")-[:like@0]->("Manu Ginobili")-[:like@0]->("Tim Duncan")> | @@ -130,16 +130,16 @@ Feature: Integer Vid All Path Given a graph with space named "nba_int_vid" When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Manu Ginobili")<-[:like]-("Tony Parker")> | @@ -147,10 +147,10 @@ Feature: Integer Vid All Path | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")> | @@ -167,10 +167,10 @@ Feature: Integer Vid All Path Given a graph with space named "nba_int_vid" When executing query: """ - FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 3 STEPS + FIND ALL PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Marco Belinelli")-[:like]->("Tony Parker")> | @@ -202,10 +202,10 @@ Feature: Integer Vid All Path Given a graph with space named "nba_int_vid" When executing query: """ - FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})-[:like@0 {likeness: 90}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | @@ -215,18 +215,18 @@ Feature: Integer Vid All Path When executing query: """ FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Yao Ming") OVER * BIDIRECT - WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS + WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM hash("Tony Parker") TO hash("Yao Ming") OVER * BIDIRECT - WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS + WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | @@ -234,10 +234,10 @@ Feature: Integer Vid All Path When executing query: """ FIND ALL PATH WITH PROP FROM hash("Yao Ming") TO hash("Danny Green") OVER * BIDIRECT - WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | @@ -254,18 +254,18 @@ Feature: Integer Vid All Path Then the execution should be successful When executing query: """ - FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 2 steps + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 2 steps YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 99}]->("Tim Parker")-[:like@0 {likeness: 90}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 2 steps + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 2 steps YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 95}]-("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 99}]->("Tim Parker")-[:like@0 {likeness: 90}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | @@ -277,3 +277,60 @@ Feature: Integer Vid All Path | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})<-[:like@0 {likeness: 95}]-("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Boris Diaw" :player{age: 36, name: "Boris Diaw"})-[:like@0 {likeness: 80}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | Then drop the used space + + Scenario: Integer Vid ALL PATH YIELD PATH + Given a graph with space named "nba_int_vid" + When executing query: + """ + FIND ALL PATH WITH PROP FROM hash("Yao Ming") TO hash("Danny Green") OVER * BIDIRECT + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p | + YIELD startnode($-.p) as startnode + """ + Then the result should be, in any order, with relax comparison: + | startnode | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + When executing query: + """ + FIND ALL PATH WITH PROP FROM hash("Yao Ming") TO hash("Danny Green") OVER * BIDIRECT + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p | + YIELD endnode($-.p) as endnode + """ + Then the result should be, in any order, with relax comparison: + | endnode | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + When executing query: + """ + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p | + YIELD length($-.p) as length + """ + Then the result should be, in any order, with relax comparison: + | length | + | 1 | + | 3 | + | 3 | + When executing query: + """ + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}]] | + | [[:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}], [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}], [:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}]] | + | [[:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}], [:like "Tony Parker"->"LaMarcus Aldridge" @0 {likeness: 90}], [:like "LaMarcus Aldridge"->"Tony Parker" @0 {likeness: 75}]] | + When executing query: + """ + FIND ALL PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}), ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | diff --git a/tests/tck/features/path/AllPath.feature b/tests/tck/features/path/AllPath.feature index a73a28fc1f7..53efe544b1f 100644 --- a/tests/tck/features/path/AllPath.feature +++ b/tests/tck/features/path/AllPath.feature @@ -7,10 +7,10 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tim Duncan" OVER * UPTO 2 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tim Duncan" OVER * UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tim Duncan")-[:teammate]->("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:teammate]->("Tim Duncan")> | @@ -23,19 +23,19 @@ Feature: All Path | <("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")-[:like]->("Tim Duncan")> | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker", "Manu Ginobili" OVER like UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker", "Manu Ginobili" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -44,20 +44,20 @@ Feature: All Path | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | @@ -73,11 +73,11 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst - | FIND ALL PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS + GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst | + FIND ALL PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -88,10 +88,10 @@ Feature: All Path When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS + FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -104,22 +104,22 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 3 + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND ALL PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Manu Ginobili")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tim Duncan")> | @@ -130,16 +130,16 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Manu Ginobili")<-[:like]-("Tony Parker")> | @@ -147,10 +147,10 @@ Feature: All Path | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like REVERSELY UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")> | @@ -167,10 +167,10 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 3 STEPS + FIND ALL PATH FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Marco Belinelli")-[:like]->("Tony Parker")> | @@ -202,10 +202,10 @@ Feature: All Path Given a graph with space named "nba" When executing query: """ - FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})-[:like@0 {likeness: 90}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | @@ -215,18 +215,18 @@ Feature: All Path When executing query: """ FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Yao Ming" OVER * BIDIRECT - WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS + WHERE (like.likeness >= 80 and like.likeness <= 90) OR (teammate.start_year is not EMPTY and teammate.start_year > 2001) UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | When executing query: """ FIND ALL PATH WITH PROP FROM "Tony Parker" TO "Yao Ming" OVER * BIDIRECT - WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS + WHERE teammate.start_year > 2000 OR (like.likeness is not EMPTY AND like.likeness >= 80) UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2001}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})<-[:like@0 {likeness: 90}]-("Yao Ming" :player{age: 38, name: "Yao Ming"})> | @@ -234,10 +234,10 @@ Feature: All Path When executing query: """ FIND ALL PATH WITH PROP FROM "Yao Ming" TO "Danny Green" OVER * BIDIRECT - WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2010, start_year: 2009}]->("Cavaliers" :team{name: "Cavaliers"})<-[:serve@0 {end_year: 2010, start_year: 2009}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:teammate@0 {end_year: 2016, start_year: 2010}]->("Danny Green" :player{age: 31, name: "Danny Green"})> | | <("Yao Ming" :player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2000, start_year: 1997}]->("Raptors" :team{name: "Raptors"})<-[:serve@0 {end_year: 2019, start_year: 2018}]-("Danny Green" :player{age: 31, name: "Danny Green"})> | @@ -254,18 +254,18 @@ Feature: All Path Then the execution should be successful When executing query: """ - FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 2 steps + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 2 steps YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 99}]->("Tim Parker")-[:like@0 {likeness: 90}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 2 steps + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 2 steps YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 95}]-("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 99}]->("Tim Parker")-[:like@0 {likeness: 90}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | @@ -277,3 +277,60 @@ Feature: All Path | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})<-[:like@0 {likeness: 95}]-("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 80}]-("Boris Diaw" :player{age: 36, name: "Boris Diaw"})-[:like@0 {likeness: 80}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | Then drop the used space + + Scenario: ALL PATH YIELD PATH + Given a graph with space named "nba" + When executing query: + """ + FIND ALL PATH WITH PROP FROM "Yao Ming" TO "Danny Green" OVER * BIDIRECT + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p | + YIELD startnode($-.p) as startnode + """ + Then the result should be, in any order, with relax comparison: + | startnode | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + | ("Yao Ming" :player{age: 38, name: "Yao Ming"}) | + When executing query: + """ + FIND ALL PATH WITH PROP FROM "Yao Ming" TO "Danny Green" OVER * BIDIRECT + WHERE like.likeness is EMPTY OR like.likeness >= 80 UPTO 3 STEPS YIELD path as p | + YIELD endnode($-.p) as endnode + """ + Then the result should be, in any order, with relax comparison: + | endnode | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + | ("Danny Green" :player{age: 31, name: "Danny Green"}) | + When executing query: + """ + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p | + YIELD length($-.p) as length + """ + Then the result should be, in any order, with relax comparison: + | length | + | 1 | + | 3 | + | 3 | + When executing query: + """ + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}]] | + | [[:like "Tim Duncan"->"Manu Ginobili" @0 {likeness: 95}], [:like "Manu Ginobili"->"Tim Duncan" @0 {likeness: 90}], [:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}]] | + | [[:like "Tim Duncan"->"Tony Parker" @0 {likeness: 95}], [:like "Tony Parker"->"LaMarcus Aldridge" @0 {likeness: 90}], [:like "LaMarcus Aldridge"->"Tony Parker" @0 {likeness: 75}]] | + When executing query: + """ + FIND ALL PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"}), ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | + | [("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"}), ("Tony Parker" :player{age: 36, name: "Tony Parker"})] | diff --git a/tests/tck/features/path/NoLoop.IntVid.feature b/tests/tck/features/path/NoLoop.IntVid.feature index 024e7a0088d..eaa83b5d8b7 100644 --- a/tests/tck/features/path/NoLoop.IntVid.feature +++ b/tests/tck/features/path/NoLoop.IntVid.feature @@ -9,19 +9,19 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [1] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [2] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Manu Ginobili") OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Manu Ginobili") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -29,20 +29,20 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [3] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: Integer Vid [4] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | @@ -54,10 +54,10 @@ Feature: Integer Vid NoLoop Path When executing query: """ GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst - | FIND NOLOOP PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS + | FIND NOLOOP PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -67,10 +67,10 @@ Feature: Integer Vid NoLoop Path When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -79,11 +79,11 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [1] NOLOOP Path With Limit When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 3 + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | < ("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:serve]->("Spurs")> | @@ -92,11 +92,11 @@ Feature: Integer Vid NoLoop Path When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -105,18 +105,18 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [1] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: Integer Vid [2] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Manu Ginobili")<-[:like]-("Tony Parker")> | @@ -124,10 +124,10 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [3] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")> | @@ -138,10 +138,10 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid [2] NOLOOP Path BIDIRECT When executing query: """ - FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 3 STEPS + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Marco Belinelli")-[:like]->("Tony Parker")> | @@ -164,41 +164,81 @@ Feature: Integer Vid NoLoop Path Scenario: Integer Vid NOLOOP Path WITH PROP When executing query: """ - FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})> | Scenario: Integer Vid NOLOOP Path WITH FILTER When executing query: """ - FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT WHERE like.likeness > 95 UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like BIDIRECT WHERE like.likeness > 95 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 99}]-("Dejounte Murray" :player{age: 29, name: "Dejounte Murray"})-[:like@0 {likeness: 99}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like, serve WHERE serve.start_year > 1990 OR like.likeness is EMPTY UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like, serve WHERE serve.start_year > 1990 OR like.likeness is EMPTY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:serve@0 {end_year: 2016, start_year: 1997}]->("Spurs" :team{name: "Spurs"})> | When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH WITH PROP FROM $a.src TO $a.dst OVER like WHERE like.likeness > 90 UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND NOLOOP PATH WITH PROP FROM $a.src TO $a.dst OVER like WHERE like.likeness > 90 UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})> | + + Scenario: Integer Vid NOLOOP Path YIELD PATH + When executing query: + """ + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like UPTO 3 STEPS YIELD path as p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | + When executing query: + """ + FIND NOLOOP PATH WITH PROP FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS YIELD path as p | + YIELD startnode($-.p) as startnode + """ + Then the result should be, in any order, with relax comparison: + | startnode | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + When executing query: + """ + $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Manu Ginobili"), ("Tim Duncan")] | + | [("Tony Parker"), ("Tim Duncan")] | + | [("Tony Parker"), ("Manu Ginobili"), ("Tim Duncan")] | + | [("Tony Parker"), ("LaMarcus Aldridge"), ("Tim Duncan")] | + When executing query: + """ + FIND NOLOOP PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY UPTO 3 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tony Parker"->"Tim Duncan" @0 {}]] | + | [[:like "LaMarcus Aldridge"->"Tim Duncan" @0 {}], [:like "Tony Parker"->"LaMarcus Aldridge" @0 {}]] | + | [[:like "Manu Ginobili"->"Tim Duncan" @0 {}], [:like "Tony Parker"->"Manu Ginobili" @0 {}]] | diff --git a/tests/tck/features/path/NoLoop.feature b/tests/tck/features/path/NoLoop.feature index 3a2cc9e3b6a..cba4baeafbe 100644 --- a/tests/tck/features/path/NoLoop.feature +++ b/tests/tck/features/path/NoLoop.feature @@ -9,19 +9,19 @@ Feature: NoLoop Path Scenario: [1] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [2] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker", "Manu Ginobili" OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker", "Manu Ginobili" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("Manu Ginobili")> | @@ -29,20 +29,20 @@ Feature: NoLoop Path Scenario: [3] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: [4] NOLOOP Path When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | @@ -54,10 +54,10 @@ Feature: NoLoop Path When executing query: """ GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst - | FIND NOLOOP PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS + | FIND NOLOOP PATH FROM $-.src TO $-.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -67,10 +67,10 @@ Feature: NoLoop Path When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -79,11 +79,11 @@ Feature: NoLoop Path Scenario: [1] NOLOOP Path With Limit When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 3 + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Manu Ginobili")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | @@ -92,11 +92,11 @@ Feature: NoLoop Path When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("LaMarcus Aldridge")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Manu Ginobili")-[:like]->("Tim Duncan")> | @@ -105,18 +105,18 @@ Feature: NoLoop Path Scenario: [1] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: [2] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Manu Ginobili")<-[:like]-("Tony Parker")> | @@ -124,10 +124,10 @@ Feature: NoLoop Path Scenario: [3] NOLOOP Path REVERSELY When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like REVERSELY UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | | <("Tim Duncan")<-[:like]-("Tony Parker")<-[:like]-("LaMarcus Aldridge")> | @@ -138,10 +138,10 @@ Feature: NoLoop Path Scenario: [2] NOLOOP Path BIDIRECT When executing query: """ - FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 3 STEPS + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")<-[:like]-("Marco Belinelli")-[:like]->("Tony Parker")> | @@ -164,41 +164,81 @@ Feature: NoLoop Path Scenario: NOLOOP Path WITH PROP When executing query: """ - FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})> | Scenario: NOLOOP Path WITH FILTER When executing query: """ - FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT WHERE like.likeness > 95 UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like BIDIRECT WHERE like.likeness > 95 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 99}]-("Dejounte Murray" :player{age: 29, name: "Dejounte Murray"})-[:like@0 {likeness: 99}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker", "Spurs" OVER like, serve WHERE serve.start_year > 1990 OR like.likeness is EMPTY UPTO 3 STEPS + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker", "Spurs" OVER like, serve WHERE serve.start_year > 1990 OR like.likeness is EMPTY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:serve@0 {end_year: 2016, start_year: 1997}]->("Spurs" :team{name: "Spurs"})> | When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND NOLOOP PATH WITH PROP FROM $a.src TO $a.dst OVER like WHERE like.likeness > 90 UPTO 3 STEPS - | ORDER BY $-.path | LIMIT 5 + FIND NOLOOP PATH WITH PROP FROM $a.src TO $a.dst OVER like WHERE like.likeness > 90 UPTO 3 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 5 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})> | + + Scenario: NOLOOP Path YIELD PATH + When executing query: + """ + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker" OVER like UPTO 3 STEPS YIELD path as p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | + When executing query: + """ + FIND NOLOOP PATH WITH PROP FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS YIELD path as p | + YIELD startnode($-.p) as startnode + """ + Then the result should be, in any order, with relax comparison: + | startnode | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + | ("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"}) | + When executing query: + """ + $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; + FIND NOLOOP PATH FROM $a.src TO $a.dst OVER like UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Manu Ginobili"), ("Tim Duncan")] | + | [("Tony Parker"), ("Tim Duncan")] | + | [("Tony Parker"), ("Manu Ginobili"), ("Tim Duncan")] | + | [("Tony Parker"), ("LaMarcus Aldridge"), ("Tim Duncan")] | + When executing query: + """ + FIND NOLOOP PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY UPTO 3 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tony Parker"->"Tim Duncan" @0 {}]] | + | [[:like "LaMarcus Aldridge"->"Tim Duncan" @0 {}], [:like "Tony Parker"->"LaMarcus Aldridge" @0 {}]] | + | [[:like "Manu Ginobili"->"Tim Duncan" @0 {}], [:like "Tony Parker"->"Manu Ginobili" @0 {}]] | diff --git a/tests/tck/features/path/ShortestPath.IntVid.feature b/tests/tck/features/path/ShortestPath.IntVid.feature index 47f71eaef0d..fbce67790c1 100644 --- a/tests/tck/features/path/ShortestPath.IntVid.feature +++ b/tests/tck/features/path/ShortestPath.IntVid.feature @@ -9,80 +9,80 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [1] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [2] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: Integer Vid [3] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER like + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: Integer Vid [4] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER like, teammate + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER like, teammate YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")> | Scenario: Integer Vid [5] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER * + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("LaMarcus Aldridge") OVER * YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")> | Scenario: Integer Vid [6] SinglePair Shortest Path limit steps When executing query: """ - FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("Tony Parker") OVER * UPTO 1 STEPS + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("Tony Parker") OVER * UPTO 1 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | When executing query: """ - FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("Tim Duncan") OVER * UPTO 1 STEPS + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("Tim Duncan") OVER * UPTO 1 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")> | Scenario: Integer Vid [1] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | Scenario: Integer Vid [2] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:teammate]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | @@ -90,10 +90,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [3] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | @@ -107,10 +107,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [4] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | @@ -120,10 +120,10 @@ Feature: Integer Vid Shortest Path | <("Tony Parker")-[:serve]->("Spurs")> | When executing query: """ - FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tim Duncan"), hash("Spurs"), hash("Lakers") OVER * UPTO 2 STEPS + FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tim Duncan"), hash("Spurs"), hash("Lakers") OVER * UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | @@ -131,10 +131,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [5] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Marco Belinelli"), hash("Yao Ming") TO hash("Spurs"), hash("Lakers") OVER * UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Marco Belinelli"), hash("Yao Ming") TO hash("Spurs"), hash("Lakers") OVER * UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | @@ -144,20 +144,20 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [6] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("LaMarcus Aldridge") OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: Integer Vid [7] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan"), hash("Tiago Splitter") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan"), hash("Tiago Splitter") TO hash("Tony Parker"), hash("Spurs") OVER like,serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tiago Splitter")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | @@ -166,20 +166,20 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [8] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tony Parker"), hash("Tracy McGrady") OVER like,serve UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Yao Ming") TO hash("Tony Parker"), hash("Tracy McGrady") OVER like,serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Yao Ming")-[:like]->("Tracy McGrady")> | Scenario: Integer Vid [9] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -188,10 +188,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [10] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -200,19 +200,19 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [11] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: Integer Vid [12] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM hash("Marco Belinelli") TO hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM hash("Marco Belinelli") TO hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Marco Belinelli")-[:serve]->("Spurs")> | | <("Marco Belinelli")-[:serve@1]->("Spurs")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | @@ -220,29 +220,29 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [1] MultiPair Shortest Path Empty Path When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like,serve UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: Integer Vid [1] MultiPair Shortest Path Run Time input When executing query: """ YIELD hash("Yao Ming") AS src, hash("Tony Parker") AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: Integer Vid [2] MultiPair Shortest Path Run Time input When executing query: """ YIELD hash("Shaquille O\'Neal") AS src - | FIND SHORTEST PATH FROM $-.src TO hash("Manu Ginobili") OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO hash("Manu Ginobili") OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | @@ -250,10 +250,10 @@ Feature: Integer Vid Shortest Path When executing query: """ YIELD hash("Manu Ginobili") AS dst - | FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO $-.dst OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM hash("Shaquille O\'Neal") TO $-.dst OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | @@ -261,10 +261,10 @@ Feature: Integer Vid Shortest Path When executing query: """ GO FROM hash("Yao Ming") over like YIELD like._dst AS src - | FIND SHORTEST PATH FROM $-.src TO hash("Tony Parker") OVER like, serve UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO hash("Tony Parker") OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | @@ -272,10 +272,10 @@ Feature: Integer Vid Shortest Path When executing query: """ $a = GO FROM hash("Yao Ming") over like YIELD like._dst AS src; - FIND SHORTEST PATH FROM $a.src TO hash("Tony Parker") OVER like, serve UPTO 5 STEPS + FIND SHORTEST PATH FROM $a.src TO hash("Tony Parker") OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | @@ -283,10 +283,10 @@ Feature: Integer Vid Shortest Path When executing query: """ GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | @@ -294,10 +294,10 @@ Feature: Integer Vid Shortest Path When executing query: """ $a = GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst; - FIND SHORTEST PATH FROM $a.src TO $a.dst OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM $a.src TO $a.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | @@ -306,114 +306,114 @@ Feature: Integer Vid Shortest Path """ $a = GO FROM hash("Tim Duncan") over like YIELD like._src AS src; GO FROM hash("Tony Parker") OVER like YIELD like._src AS src, like._dst AS dst - | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS + | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: Integer Vid [1] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like,serve UPTO 3 STEPS | ORDER BY $-.path | LIMIT 3 + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like,serve UPTO 3 STEPS YIELD path as p | ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: Integer Vid [2] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 2 + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 2 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: Integer Vid [3] Shortest Path With Limit When executing query: """ - GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 1 + GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst | + FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 1 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker")-[:like@0]->("Tim Duncan")> | Scenario: Integer Vid [4] Shortest Path With Limit When executing query: """ - GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 10 + GO FROM hash("Tim Duncan") over * YIELD like._dst AS src, serve._src AS dst | + FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 10 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | Scenario: Integer Vid [1] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"), hash("Spur") OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: Integer Vid [2] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker") OVER like REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | Scenario: Integer Vid [3] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like REVERSELY + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | Scenario: Integer Vid [4] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve REVERSELY UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Tony Parker"), hash("Spurs") OVER like,serve REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | Scenario: Integer Vid [5] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY + FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | Scenario: Integer Vid [1] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"),hash("Spur") OVER like BIDIRECT UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tim Duncan") TO hash("Nobody"),hash("Spur") OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order: - | path | + | p | Scenario: Integer Vid [2] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS + FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Tony Parker")-[:serve]->("Spurs")> | @@ -424,10 +424,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid [3] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 3 STEPS + FIND SHORTEST PATH FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -450,24 +450,24 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid Shortest Path With PROP When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like + FIND SHORTEST PATH WITH PROP FROM hash("Tim Duncan") TO hash("LaMarcus Aldridge") OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal": player{age: 47, name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004,start_year: 1996}]->("Lakers": team{name: "Lakers"})> | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39, name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | @@ -478,10 +478,10 @@ Feature: Integer Vid Shortest Path Scenario: Integer Vid Shortest Path With Filter When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | @@ -489,26 +489,75 @@ Feature: Integer Vid Shortest Path | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY WHERE like.likeness > 70 + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * REVERSELY WHERE like.likeness > 70 YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 90}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ $a = GO FROM hash("Yao Ming") over like YIELD like._dst AS src; - FIND SHORTEST PATH WITH PROP FROM $a.src TO hash("Tony Parker") OVER like, serve WHERE serve.start_year is EMPTY UPTO 5 STEPS + FIND SHORTEST PATH WITH PROP FROM $a.src TO hash("Tony Parker") OVER like, serve WHERE serve.start_year is EMPTY UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:like@0 {likeness: 90}]->("Rudy Gay" :player{age: 32, name: "Rudy Gay"})-[:like@0 {likeness: 70}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + + Scenario: Integer Vid Shortest Path YIELD path + When executing query: + """ + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + When executing query: + """ + FIND SHORTEST PATH FROM hash("Shaquille O\'Neal"), hash("Nobody") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * UPTO 5 STEPS YIELD path as p | + YIELD length($-.p) as length + """ + Then the result should be, in any order, with relax comparison: + | length | + | 2 | + | 1 | + | 2 | + | 2 | + When executing query: + """ + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + When executing query: + """ + FIND SHORTEST PATH WITH PROP FROM hash("Tony Parker"), hash("Yao Ming") TO hash("Manu Ginobili"), hash("Spurs"), hash("Lakers") OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p | + YIELD distinct nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + When executing query: + """ + FIND SHORTEST PATH FROM hash("Tiago Splitter") TO hash("Tim Duncan") OVER * UPTO 1 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tiago Splitter"->"Tim Duncan" @0 {}]] | diff --git a/tests/tck/features/path/ShortestPath.feature b/tests/tck/features/path/ShortestPath.feature index 55587e0d76a..c4af624f489 100644 --- a/tests/tck/features/path/ShortestPath.feature +++ b/tests/tck/features/path/ShortestPath.feature @@ -9,80 +9,80 @@ Feature: Shortest Path Scenario: [1] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker" OVER like + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker" OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [2] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like + FIND SHORTEST PATH FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: [3] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER like + FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: [4] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER like, teammate + FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER like, teammate YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")> | Scenario: [5] SinglePair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER * + FIND SHORTEST PATH FROM "Tiago Splitter" TO "LaMarcus Aldridge" OVER * YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:teammate]->("LaMarcus Aldridge")> | Scenario: [6] SinglePair Shortest Path limit steps When executing query: """ - FIND SHORTEST PATH FROM "Tiago Splitter" TO "Tony Parker" OVER * UPTO 1 STEPS + FIND SHORTEST PATH FROM "Tiago Splitter" TO "Tony Parker" OVER * UPTO 1 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | When executing query: """ - FIND SHORTEST PATH FROM "Tiago Splitter" TO "Tim Duncan" OVER * UPTO 1 STEPS + FIND SHORTEST PATH FROM "Tiago Splitter" TO "Tim Duncan" OVER * UPTO 1 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")> | Scenario: [1] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | Scenario: [2] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:teammate]->("Tony Parker")> | | <("Tim Duncan")-[:serve]->("Spurs")> | @@ -90,10 +90,10 @@ Feature: Shortest Path Scenario: [3] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | @@ -107,10 +107,10 @@ Feature: Shortest Path Scenario: [4] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | @@ -120,10 +120,10 @@ Feature: Shortest Path | <("Tony Parker")-[:serve]->("Spurs")> | When executing query: """ - FIND SHORTEST PATH FROM "Yao Ming" TO "Tim Duncan", "Spurs", "Lakers" OVER * UPTO 2 STEPS + FIND SHORTEST PATH FROM "Yao Ming" TO "Tim Duncan", "Spurs", "Lakers" OVER * UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")> | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | @@ -131,10 +131,10 @@ Feature: Shortest Path Scenario: [5] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Marco Belinelli", "Yao Ming" TO "Spurs", "Lakers" OVER * UPTO 3 STEPS + FIND SHORTEST PATH FROM "Marco Belinelli", "Yao Ming" TO "Spurs", "Lakers" OVER * UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | @@ -144,20 +144,20 @@ Feature: Shortest Path Scenario: [6] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","LaMarcus Aldridge" OVER like UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: [7] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan", "Tiago Splitter" TO "Tony Parker","Spurs" OVER like,serve UPTO 5 STEPS + FIND SHORTEST PATH FROM "Tim Duncan", "Tiago Splitter" TO "Tony Parker","Spurs" OVER like,serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tiago Splitter")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Tiago Splitter")-[:serve]->("Spurs")> | | <("Tim Duncan")-[:like]->("Tony Parker")> | @@ -166,20 +166,20 @@ Feature: Shortest Path Scenario: [8] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Yao Ming" TO "Tony Parker","Tracy McGrady" OVER like,serve UPTO 5 STEPS + FIND SHORTEST PATH FROM "Yao Ming" TO "Tony Parker","Tracy McGrady" OVER like,serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | | <("Yao Ming")-[:like]->("Tracy McGrady")> | Scenario: [9] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -188,10 +188,10 @@ Feature: Shortest Path Scenario: [10] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -200,19 +200,19 @@ Feature: Shortest Path Scenario: [11] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO "Manu Ginobili", "Spurs", "Lakers" OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: [12] MultiPair Shortest Path When executing query: """ - FIND SHORTEST PATH FROM "Marco Belinelli" TO "Spurs", "Lakers" OVER * UPTO 5 STEPS + FIND SHORTEST PATH FROM "Marco Belinelli" TO "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Marco Belinelli")-[:serve]->("Spurs")> | | <("Marco Belinelli")-[:serve@1]->("Spurs")> | | <("Marco Belinelli")-[:like]->("Danny Green")-[:like]->("LeBron James")-[:serve]->("Lakers")> | @@ -220,29 +220,29 @@ Feature: Shortest Path Scenario: [1] MultiPair Shortest Path Empty Path When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like,serve UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like,serve UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: [1] MultiPair Shortest Path Run Time input When executing query: """ YIELD "Yao Ming" AS src, "Tony Parker" AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | Scenario: [2] MultiPair Shortest Path Run Time input When executing query: """ YIELD "Shaquille O\'Neal" AS src - | FIND SHORTEST PATH FROM $-.src TO "Manu Ginobili" OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO "Manu Ginobili" OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | @@ -250,10 +250,10 @@ Feature: Shortest Path When executing query: """ YIELD "Manu Ginobili" AS dst - | FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO $-.dst OVER * UPTO 5 STEPS + | FIND SHORTEST PATH FROM "Shaquille O\'Neal" TO $-.dst OVER * UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | @@ -261,10 +261,10 @@ Feature: Shortest Path When executing query: """ GO FROM "Yao Ming" over like YIELD like._dst AS src - | FIND SHORTEST PATH FROM $-.src TO "Tony Parker" OVER like, serve UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO "Tony Parker" OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | @@ -272,10 +272,10 @@ Feature: Shortest Path When executing query: """ $a = GO FROM "Yao Ming" over like YIELD like._dst AS src; - FIND SHORTEST PATH FROM $a.src TO "Tony Parker" OVER like, serve UPTO 5 STEPS + FIND SHORTEST PATH FROM $a.src TO "Tony Parker" OVER like, serve UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady")-[:like]->("Rudy Gay")-[:like]->("LaMarcus Aldridge")-[:like]->("Tony Parker")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Tony Parker")> | @@ -283,10 +283,10 @@ Feature: Shortest Path When executing query: """ GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS + | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | @@ -294,10 +294,10 @@ Feature: Shortest Path When executing query: """ $a = GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst; - FIND SHORTEST PATH FROM $a.src TO $a.dst OVER like UPTO 5 STEPS + FIND SHORTEST PATH FROM $a.src TO $a.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | @@ -306,114 +306,114 @@ Feature: Shortest Path """ $a = GO FROM "Tim Duncan" over like YIELD like._src AS src; GO FROM "Tony Parker" OVER like YIELD like._src AS src, like._dst AS dst - | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS + | FIND SHORTEST PATH FROM $a.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")-[:like]->("Manu Ginobili")> | | <("Tim Duncan")-[:like]->("Tony Parker")-[:like]->("LaMarcus Aldridge")> | Scenario: [1] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like,serve UPTO 3 STEPS | ORDER BY $-.path | LIMIT 3 + FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like,serve UPTO 3 STEPS YIELD path as p | ORDER BY $-.p | LIMIT 3 """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: [2] Shortest Path With Limit When executing query: """ - FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 2 + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 2 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | Scenario: [3] Shortest Path With Limit When executing query: """ - GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 1 + GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst | + FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 1 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | Scenario: [4] Shortest Path With Limit When executing query: """ - GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst - | FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS - | ORDER BY $-.path | LIMIT 10 + GO FROM "Tim Duncan" over * YIELD like._dst AS src, serve._src AS dst | + FIND SHORTEST PATH FROM $-.src TO $-.dst OVER like UPTO 5 STEPS YIELD path as p | + ORDER BY $-.p | LIMIT 10 """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Manu Ginobili")-[:like]->("Tim Duncan")> | | <("Tony Parker")-[:like]->("Tim Duncan")> | Scenario: [1] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | Scenario: [2] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker" OVER like REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | Scenario: [3] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like REVERSELY + FIND SHORTEST PATH FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("LaMarcus Aldridge")> | Scenario: [4] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve REVERSELY UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Tony Parker","Spurs" OVER like,serve REVERSELY UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan")<-[:like]-("Tony Parker")> | Scenario: [5] Shortest Path REVERSELY When executing query: """ - FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY + FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker")<-[:teammate]-("Manu Ginobili")> | Scenario: [1] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like BIDIRECT UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tim Duncan" TO "Nobody","Spur" OVER like BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order: - | path | + | p | Scenario: [2] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS + FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:serve]->("Lakers")> | | <("Tony Parker")-[:serve]->("Spurs")> | @@ -424,10 +424,10 @@ Feature: Shortest Path Scenario: [3] Shortest Path BIDIRECT When executing query: """ - FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 3 STEPS + FIND SHORTEST PATH FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming")-[:like]->("Tracy McGrady")-[:serve]->("Spurs")<-[:serve]-("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")<-[:like]-("Manu Ginobili")> | | <("Yao Ming")-[:like]->("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | @@ -450,24 +450,24 @@ Feature: Shortest Path Scenario: Shortest Path With PROP When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like + FIND SHORTEST PATH WITH PROP FROM "Tim Duncan" TO "LaMarcus Aldridge" OVER like YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 90}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | @@ -478,10 +478,10 @@ Feature: Shortest Path Scenario: Shortest Path With Filter When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE like.likeness == 90 OR like.likeness is empty UPTO 2 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Shaquille O'Neal" :player {age: 47,name: "Shaquille O'Neal"})-[:serve@0 {end_year: 2004, start_year: 1996}]->("Lakers": team{name: "Lakers"})> | | <("Yao Ming" : player{age: 38, name: "Yao Ming"})-[:like@0 {likeness: 90}]->("Tracy McGrady": player{age: 39,name: "Tracy McGrady"})-[:serve@0 {end_year: 2013, start_year: 2013}]->("Spurs": team{name: "Spurs"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:serve@0 {end_year: 2018, start_year: 1999}]->("Spurs" :team{name: "Spurs"})> | @@ -489,26 +489,75 @@ Feature: Shortest Path | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY WHERE like.likeness > 70 + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * REVERSELY WHERE like.likeness > 70 YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:like@0 {likeness: 95}]-("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})<-[:like@0 {likeness: 90}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | When executing query: """ $a = GO FROM "Yao Ming" over like YIELD like._dst AS src; - FIND SHORTEST PATH WITH PROP FROM $a.src TO "Tony Parker" OVER like, serve WHERE serve.start_year is EMPTY UPTO 5 STEPS + FIND SHORTEST PATH WITH PROP FROM $a.src TO "Tony Parker" OVER like, serve WHERE serve.start_year is EMPTY UPTO 5 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tracy McGrady" :player{age: 39, name: "Tracy McGrady"})-[:like@0 {likeness: 90}]->("Rudy Gay" :player{age: 32, name: "Rudy Gay"})-[:like@0 {likeness: 70}]->("LaMarcus Aldridge" :player{age: 33, name: "LaMarcus Aldridge"})-[:like@0 {likeness: 75}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | | <("Shaquille O'Neal" :player{age: 47, name: "Shaquille O'Neal"})-[:like@0 {likeness: 80}]->("Tim Duncan" :bachelor{name: "Tim Duncan", speciality: "psychology"} :player{age: 42, name: "Tim Duncan"})-[:like@0 {likeness: 95}]->("Tony Parker" :player{age: 36, name: "Tony Parker"})> | When executing query: """ - FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p """ Then the result should be, in any order, with relax comparison: - | path | + | p | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})<-[:teammate@0 {end_year: 2016, start_year: 2002}]-("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:like@0 {likeness: 95}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | | <("Tony Parker" :player{age: 36, name: "Tony Parker"})-[:teammate@0 {end_year: 2018, start_year: 2002}]->("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})> | + + Scenario: Shortest Path YIELD path + When executing query: + """ + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p + """ + Then the result should be, in any order, with relax comparison: + | p | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:serve]->("Spurs")> | + | <("Shaquille O'Neal")-[:serve]->("Lakers")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:like]->("Manu Ginobili")> | + | <("Shaquille O'Neal")-[:like]->("Tim Duncan")-[:teammate]->("Manu Ginobili")> | + When executing query: + """ + FIND SHORTEST PATH FROM "Shaquille O\'Neal", "Nobody" TO "Manu Ginobili", "Spurs", "Lakers" OVER * UPTO 5 STEPS YIELD path as p | + YIELD length($-.p) as length + """ + Then the result should be, in any order, with relax comparison: + | length | + | 2 | + | 1 | + | 2 | + | 2 | + When executing query: + """ + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p | + YIELD nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + When executing query: + """ + FIND SHORTEST PATH WITH PROP FROM "Tony Parker", "Yao Ming" TO "Manu Ginobili", "Spurs", "Lakers" OVER * BIDIRECT WHERE teammate.start_year is not EMPTY OR like.likeness > 90 UPTO 3 STEPS YIELD path as p | + YIELD distinct nodes($-.p) as nodes + """ + Then the result should be, in any order, with relax comparison: + | nodes | + | [("Tony Parker" :player{age: 36, name: "Tony Parker"}), ("Manu Ginobili" :player{age: 41, name: "Manu Ginobili"})] | + When executing query: + """ + FIND SHORTEST PATH FROM "Tiago Splitter" TO "Tim Duncan" OVER * UPTO 1 STEPS YIELD path as p | + YIELD relationships($-.p) as relationships + """ + Then the result should be, in any order, with relax comparison: + | relationships | + | [[:like "Tiago Splitter"->"Tim Duncan" @0 {}]] | From 46741d11664bc3bb3d5f24c2450bccf379548eba Mon Sep 17 00:00:00 2001 From: "kyle.cao" Date: Tue, 23 Nov 2021 13:56:58 +0800 Subject: [PATCH 35/53] fix previous merge conflict (#3342) --- .../executor/admin/BalanceLeadersExecutor.h | 29 - src/meta/processors/admin/Balancer.cpp | 1232 ----------------- src/meta/processors/admin/Balancer.h | 269 ---- 3 files changed, 1530 deletions(-) delete mode 100644 src/graph/executor/admin/BalanceLeadersExecutor.h delete mode 100644 src/meta/processors/admin/Balancer.cpp delete mode 100644 src/meta/processors/admin/Balancer.h diff --git a/src/graph/executor/admin/BalanceLeadersExecutor.h b/src/graph/executor/admin/BalanceLeadersExecutor.h deleted file mode 100644 index 604d49bc444..00000000000 --- a/src/graph/executor/admin/BalanceLeadersExecutor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ - -#include "graph/context/QueryContext.h" -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class BalanceLeadersExecutor final : public Executor { - public: - BalanceLeadersExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("BalanceLeadersExecutor", node, qctx) {} - - folly::Future execute() override; - - private: - folly::Future balanceLeaders(); -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_BALANCELEADERSEXECUTOR_H_ diff --git a/src/meta/processors/admin/Balancer.cpp b/src/meta/processors/admin/Balancer.cpp deleted file mode 100644 index 8cdad02b45a..00000000000 --- a/src/meta/processors/admin/Balancer.cpp +++ /dev/null @@ -1,1232 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/admin/Balancer.h" - -#include - -#include -#include - -#include "common/network/NetworkUtils.h" -#include "common/utils/MetaKeyUtils.h" -#include "kvstore/NebulaStore.h" -#include "meta/ActiveHostsMan.h" -#include "meta/common/MetaCommon.h" -#include "meta/processors/Common.h" - -DEFINE_double(leader_balance_deviation, - 0.05, - "after leader balance, leader count should in range " - "[avg * (1 - deviation), avg * (1 + deviation)]"); - -namespace nebula { -namespace meta { - -ErrorOr Balancer::balance(std::vector&& lostHosts) { - std::lock_guard lg(lock_); - if (!running_) { - auto retCode = recovery(); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Recovery balancer failed!"; - finish(); - return retCode; - } - if (plan_ == nullptr) { - LOG(INFO) << "There is no corrupted plan need to recovery, so create a new one"; - retCode = buildBalancePlan(std::move(lostHosts)); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Create balance plan failed"; - finish(); - return retCode; - } - } - LOG(INFO) << "Start to invoke balance plan " << plan_->id(); - executor_->add(std::bind(&BalancePlan::invoke, plan_.get())); - running_ = true; - return plan_->id(); - } - CHECK(!!plan_); - LOG(INFO) << "Balance plan " << plan_->id() << " is still running"; - return plan_->id(); -} - -ErrorOr Balancer::show(BalanceID id) const { - std::lock_guard lg(lock_); - if (plan_ != nullptr && plan_->id() == id) { - return *plan_; - } - - if (kv_) { - BalancePlan plan(id, kv_, client_); - auto retCode = plan.recovery(false); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get balance plan failed, id " << id; - return retCode; - } - return plan; - } - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; -} - -ErrorOr Balancer::stop() { - std::lock_guard lg(lock_); - if (!running_) { - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } - CHECK(!!plan_); - plan_->stop(); - LOG(INFO) << "Stop balance plan " << plan_->id(); - return plan_->id(); -} - -ErrorOr Balancer::cleanLastInValidPlan() { - std::lock_guard lg(lock_); - auto* store = static_cast(kv_); - if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { - return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; - } - if (running_) { - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; - } - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - // There should be at most one invalid plan, and it must be the latest one - if (iter->valid()) { - auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); - if (status == BalanceStatus::FAILED) { - auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); - folly::Baton baton; - auto result = nebula::cpp2::ErrorCode::SUCCEEDED; - // Only remove the plan will be enough - kv_->asyncMultiRemove(kDefaultSpaceId, - kDefaultPartId, - {iter->key().str()}, - [&baton, &result](nebula::cpp2::ErrorCode code) { - result = code; - baton.post(); - }); - baton.wait(); - if (result != nebula::cpp2::ErrorCode::SUCCEEDED) { - return result; - } - return balanceId; - } - } - return nebula::cpp2::ErrorCode::E_NO_INVALID_BALANCE_PLAN; -} - -nebula::cpp2::ErrorCode Balancer::recovery() { - CHECK(!plan_) << "plan should be nullptr now"; - if (kv_) { - auto* store = static_cast(kv_); - if (!store->isLeader(kDefaultSpaceId, kDefaultPartId)) { - // We need to check whether is leader or not, otherwise we would failed to - // persist state of BalancePlan and BalanceTask, so we just reject request - // if not leader. - return nebula::cpp2::ErrorCode::E_LEADER_CHANGED; - } - const auto& prefix = MetaKeyUtils::balancePlanPrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't access kvstore, ret = " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - std::vector corruptedPlans; - // The balance plan is stored with balance id desc order, there should be at - // most one failed or in_progress plan, and it must be the latest one - if (iter->valid()) { - auto status = MetaKeyUtils::parseBalanceStatus(iter->val()); - if (status == BalanceStatus::IN_PROGRESS || status == BalanceStatus::FAILED) { - auto balanceId = MetaKeyUtils::parseBalanceID(iter->key()); - corruptedPlans.emplace_back(balanceId); - } - } - if (corruptedPlans.empty()) { - LOG(INFO) << "No corrupted plan need to recovery!"; - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - - CHECK_EQ(1, corruptedPlans.size()); - plan_ = std::make_unique(corruptedPlans[0], kv_, client_); - plan_->onFinished_ = [this]() { - auto self = plan_; - { - std::lock_guard lg(lock_); - if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; - } - finish(); - } - }; - auto recRet = plan_->recovery(); - if (recRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't recovery plan " << corruptedPlans[0]; - return recRet; - } - } - // save the balance plan again because FAILED tasks would be marked as - // IN_PROGRESS again - return plan_->saveInStore(); -} - -nebula::cpp2::ErrorCode Balancer::getAllSpaces( - std::vector>& spaces) { - // Get all spaces - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::spacePrefix(); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get all spaces failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - while (iter->valid()) { - auto spaceId = MetaKeyUtils::spaceId(iter->key()); - auto properties = MetaKeyUtils::parseSpace(iter->val()); - bool zoned = properties.group_name_ref().has_value(); - spaces.emplace_back(spaceId, *properties.replica_factor_ref(), zoned); - iter->next(); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -nebula::cpp2::ErrorCode Balancer::buildBalancePlan(std::vector&& lostHosts) { - if (plan_ != nullptr) { - LOG(ERROR) << "Balance plan should be nullptr now"; - return nebula::cpp2::ErrorCode::E_BALANCED; - } - - std::vector> spaces; - auto spacesRet = getAllSpaces(spaces); - if (spacesRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't get all spaces"; - return spacesRet; - } - - plan_ = std::make_unique(time::WallClock::fastNowInSec(), kv_, client_); - for (const auto& spaceInfo : spaces) { - auto spaceId = std::get<0>(spaceInfo); - auto spaceReplica = std::get<1>(spaceInfo); - auto dependentOnGroup = std::get<2>(spaceInfo); - LOG(INFO) << "Balance Space " << spaceId; - auto taskRet = genTasks(spaceId, spaceReplica, dependentOnGroup, std::move(lostHosts)); - if (!ok(taskRet)) { - LOG(ERROR) << "Generate tasks on space " << std::get<0>(spaceInfo) << " failed"; - return error(taskRet); - } - - auto tasks = std::move(value(taskRet)); - for (auto& task : tasks) { - plan_->addTask(std::move(task)); - } - } - - plan_->onFinished_ = [this]() { - auto self = plan_; - { - std::lock_guard lg(lock_); - if (LastUpdateTimeMan::update(kv_, time::WallClock::fastNowInMilliSec()) != - nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Balance plan " << plan_->id() << " update meta failed"; - } - finish(); - } - }; - if (plan_->tasks_.empty()) { - return nebula::cpp2::ErrorCode::E_BALANCED; - } - return plan_->saveInStore(); -} - -ErrorOr> Balancer::genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnGroup, - std::vector&& lostHosts) { - HostParts hostParts; - int32_t totalParts = 0; - // hostParts is current part allocation map - auto result = getHostParts(spaceId, dependentOnGroup, hostParts, totalParts); - if (!nebula::ok(result)) { - return nebula::error(result); - } - - auto retVal = nebula::value(result); - if (!retVal || totalParts == 0 || hostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND; - } - - auto fetchHostPartsRet = fetchHostParts(spaceId, dependentOnGroup, hostParts, lostHosts); - if (!nebula::ok(fetchHostPartsRet)) { - LOG(ERROR) << "Fetch hosts and parts failed"; - return nebula::error(fetchHostPartsRet); - } - - auto hostPartsRet = nebula::value(fetchHostPartsRet); - auto confirmedHostParts = hostPartsRet.first; - auto activeHosts = hostPartsRet.second; - LOG(INFO) << "Now, try to balance the confirmedHostParts"; - - // We have two parts need to balance, the first one is parts on lost hosts and - // deleted hosts The seconds one is parts on unbalanced host in - // confirmedHostParts. - std::vector tasks; - // 1. Iterate through all hosts that would not be included in - // confirmedHostParts, - // move all parts in them to host with minimum part in confirmedHostParts - for (auto& lostHost : lostHosts) { - auto& lostParts = hostParts[lostHost]; - for (auto& partId : lostParts) { - LOG(INFO) << "Try balance part " << partId << " for lost host " << lostHost; - // check whether any peers which is alive - auto alive = checkReplica(hostParts, activeHosts, spaceReplica, partId); - if (!alive.ok()) { - LOG(ERROR) << "Check Replica failed: " << alive << " Part: " << partId; - return nebula::cpp2::ErrorCode::E_NO_VALID_HOST; - } - - auto retCode = - transferLostHost(tasks, confirmedHostParts, lostHost, spaceId, partId, dependentOnGroup); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Transfer lost host " << lostHost << " failed"; - return retCode; - } - } - } - - // 2. Make all hosts in confirmedHostParts balanced - if (balanceParts(plan_->id_, spaceId, confirmedHostParts, totalParts, tasks, dependentOnGroup)) { - return tasks; - } else { - return nebula::cpp2::ErrorCode::E_BAD_BALANCE_PLAN; - } -} - -nebula::cpp2::ErrorCode Balancer::transferLostHost(std::vector& tasks, - HostParts& confirmedHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup) { - // find a host with minimum parts which doesn't have this part - ErrorOr result; - if (dependentOnGroup) { - result = hostWithMinimalPartsForZone(source, confirmedHostParts, partId); - } else { - result = hostWithMinimalParts(confirmedHostParts, partId); - } - - if (!nebula::ok(result)) { - LOG(ERROR) << "Can't find a host which doesn't have part: " << partId; - return nebula::error(result); - } - const auto& targetHost = nebula::value(result); - confirmedHostParts[targetHost].emplace_back(partId); - tasks.emplace_back(plan_->id_, spaceId, partId, source, targetHost, kv_, client_); - zoneParts_[targetHost].second.emplace_back(partId); - auto zoneIt = - std::find(zoneParts_[source].second.begin(), zoneParts_[source].second.end(), partId); - if (zoneIt == zoneParts_[source].second.end()) { - LOG(ERROR) << "part not find " << partId << " at " << source; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -ErrorOr>> -Balancer::fetchHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - const HostParts& hostParts, - std::vector& lostHosts) { - ErrorOr> activeHostsRet; - if (dependentOnGroup) { - activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); - } else { - activeHostsRet = ActiveHostsMan::getActiveHosts(kv_); - } - - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } - - std::vector expand; - auto activeHosts = nebula::value(activeHostsRet); - calDiff(hostParts, activeHosts, expand, lostHosts); - // confirmedHostParts is new part allocation map after balance, it would - // include newlyAdded and exclude lostHosts - HostParts confirmedHostParts(hostParts); - for (const auto& h : expand) { - LOG(INFO) << "Found new host " << h; - confirmedHostParts.emplace(h, std::vector()); - } - for (const auto& h : lostHosts) { - LOG(INFO) << "Lost host " << h; - confirmedHostParts.erase(h); - } - return std::make_pair(confirmedHostParts, activeHosts); -} - -bool Balancer::balanceParts(BalanceID balanceId, - GraphSpaceID spaceId, - HostParts& confirmedHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnGroup) { - auto avgLoad = static_cast(totalParts) / confirmedHostParts.size(); - VLOG(3) << "The expect avg load is " << avgLoad; - int32_t minLoad = std::floor(avgLoad); - int32_t maxLoad = std::ceil(avgLoad); - VLOG(3) << "The min load is " << minLoad << " max load is " << maxLoad; - - auto sortedHosts = sortedHostsByParts(confirmedHostParts); - if (sortedHosts.empty()) { - LOG(ERROR) << "Host is empty"; - return false; - } - - auto maxPartsHost = sortedHosts.back(); - auto minPartsHost = sortedHosts.front(); - auto& sourceHost = maxPartsHost.first; - auto& targetHost = minPartsHost.first; - if (innerBalance_) { - LOG(INFO) << "maxPartsHost.first " << maxPartsHost.first << " minPartsHost.first " - << minPartsHost.first; - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); - } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; - } - totalPartsZone += it->second.size(); - } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; - } - - while (maxPartsHost.second > maxLoad || minPartsHost.second < minLoad) { - auto& partsFrom = confirmedHostParts[maxPartsHost.first]; - auto& partsTo = confirmedHostParts[minPartsHost.first]; - std::sort(partsFrom.begin(), partsFrom.end()); - std::sort(partsTo.begin(), partsTo.end()); - - LOG(INFO) << maxPartsHost.first << ":" << partsFrom.size() << " -> " << minPartsHost.first - << ":" << partsTo.size(); - std::vector diff; - std::set_difference(partsFrom.begin(), - partsFrom.end(), - partsTo.begin(), - partsTo.end(), - std::inserter(diff, diff.begin())); - bool noAction = true; - for (auto& partId : diff) { - LOG(INFO) << "partsFrom size " << partsFrom.size() << " partsTo size " << partsTo.size() - << " minLoad " << minLoad << " maxLoad " << maxLoad; - if (partsFrom.size() == partsTo.size() + 1 || - partsFrom.size() == static_cast(minLoad) || - partsTo.size() == static_cast(maxLoad)) { - VLOG(3) << "No need to move any parts from " << maxPartsHost.first << " to " - << minPartsHost.first; - break; - } - - LOG(INFO) << "[space:" << spaceId << ", part:" << partId << "] " << maxPartsHost.first << "->" - << minPartsHost.first; - auto it = std::find(partsFrom.begin(), partsFrom.end(), partId); - if (it == partsFrom.end()) { - LOG(ERROR) << "Part " << partId << " not found in partsFrom"; - return false; - } - - if (std::find(partsTo.begin(), partsTo.end(), partId) != partsTo.end()) { - LOG(ERROR) << "Part " << partId << " already existed in partsTo"; - return false; - } - - if (dependentOnGroup) { - if (!checkZoneLegal(sourceHost, targetHost)) { - LOG(INFO) << "sourceHost " << sourceHost << " targetHost " << targetHost - << " not same zone"; - - auto& parts = relatedParts_[targetHost]; - auto minIt = std::find(parts.begin(), parts.end(), partId); - if (minIt != parts.end()) { - LOG(INFO) << "Part " << partId << " have existed"; - continue; - } - } - - auto& sourceNoneName = zoneParts_[sourceHost].first; - auto sourceHosts = zoneHosts_.find(sourceNoneName); - for (auto& sh : sourceHosts->second) { - auto& parts = relatedParts_[sh]; - auto maxIt = std::find(parts.begin(), parts.end(), partId); - if (maxIt == parts.end()) { - LOG(INFO) << "Part " << partId << " not found on " << sh; - continue; - } - parts.erase(maxIt); - } - - auto& targetNoneName = zoneParts_[targetHost].first; - auto targetHosts = zoneHosts_.find(targetNoneName); - for (auto& th : targetHosts->second) { - relatedParts_[th].emplace_back(partId); - } - } - - partsFrom.erase(it); - partsTo.emplace_back(partId); - tasks.emplace_back( - balanceId, spaceId, partId, maxPartsHost.first, minPartsHost.first, kv_, client_); - noAction = false; - } - - if (noAction) { - LOG(INFO) << "Here is no action"; - break; - } - sortedHosts = sortedHostsByParts(confirmedHostParts); - maxPartsHost = sortedHosts.back(); - minPartsHost = sortedHosts.front(); - if (innerBalance_) { - while (!checkZoneLegal(maxPartsHost.first, minPartsHost.first)) { - sortedHosts.pop_back(); - maxPartsHost = sortedHosts.back(); - } - - auto& source = maxPartsHost.first; - auto iter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - auto& zoneName = iter->second.first; - int32_t hostsSize = zoneHosts_[zoneName].size(); - int32_t totalPartsZone = 0; - for (auto& host : zoneHosts_[zoneName]) { - auto it = confirmedHostParts.find(host); - if (it == confirmedHostParts.end()) { - LOG(ERROR) << "Host " << host << "not in confirmedHostParts"; - continue; - } - totalPartsZone += it->second.size(); - } - - avgLoad = static_cast(totalPartsZone) / hostsSize; - minLoad = std::floor(avgLoad); - maxLoad = std::ceil(avgLoad); - LOG(INFO) << "Update min and max loading Total parts in zone " << totalPartsZone - << ", total hosts " << hostsSize << " The expect avg load is " << avgLoad - << " The min load is " << minLoad << " max load is " << maxLoad; - } - } - LOG(INFO) << "Balance tasks num: " << tasks.size(); - for (auto& task : tasks) { - LOG(INFO) << task.taskIdStr(); - } - - relatedParts_.clear(); - return true; -} - -ErrorOr Balancer::getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << " " - << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - while (iter->valid()) { - auto key = iter->key(); - PartitionID partId; - memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); - auto partHosts = MetaKeyUtils::parsePartVal(iter->val()); - for (auto& ph : partHosts) { - hostParts[ph].emplace_back(partId); - } - totalParts++; - iter->next(); - } - - LOG(INFO) << "Host size: " << hostParts.size(); - auto key = MetaKeyUtils::spaceKey(spaceId); - std::string value; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, key, &value); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId - << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto properties = MetaKeyUtils::parseSpace(value); - if (totalParts != properties.get_partition_num()) { - LOG(ERROR) << "Partition number not equals"; - LOG(ERROR) << totalParts << " : " << properties.get_partition_num(); - return false; - } - - int32_t replica = properties.get_replica_factor(); - LOG(INFO) << "Replica " << replica; - if (dependentOnGroup && properties.group_name_ref().has_value()) { - auto groupName = *properties.group_name_ref(); - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - int32_t zoneSize = MetaKeyUtils::parseZoneNames(std::move(groupValue)).size(); - LOG(INFO) << "Zone Size " << zoneSize; - innerBalance_ = (replica == zoneSize); - - auto activeHostsRet = ActiveHostsMan::getActiveHostsWithGroup(kv_, spaceId); - if (!nebula::ok(activeHostsRet)) { - return nebula::error(activeHostsRet); - } - - std::vector expand; - auto activeHosts = nebula::value(activeHostsRet); - std::vector lostHosts; - calDiff(hostParts, activeHosts, expand, lostHosts); - // confirmedHostParts is new part allocation map after balance, it would include newlyAdded - // and exclude lostHosts - HostParts confirmedHostParts(hostParts); - for (const auto& h : expand) { - LOG(INFO) << "Found new host " << h; - confirmedHostParts.emplace(h, std::vector()); - } - for (const auto& h : lostHosts) { - LOG(INFO) << "Lost host " << h; - confirmedHostParts.erase(h); - } - - auto zonePartsRet = assembleZoneParts(groupName, confirmedHostParts); - if (zonePartsRet != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Assemble Zone Parts failed group: " << groupName; - return zonePartsRet; - } - } - - totalParts *= replica; - return true; -} - -nebula::cpp2::ErrorCode Balancer::assembleZoneParts(const std::string& groupName, - HostParts& hostParts) { - LOG(INFO) << "Balancer assembleZoneParts"; - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - // zoneHosts use to record this host belong to zone's hosts - std::unordered_map, std::vector> zoneHosts; - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); - for (auto zoneName : zoneNames) { - LOG(INFO) << "Zone Name: " << zoneName; - auto zoneKey = MetaKeyUtils::zoneKey(zoneName); - std::string zoneValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName - << " failed: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); - for (const auto& host : hosts) { - LOG(INFO) << "Host for zone " << host; - auto pair = std::pair(std::move(host), zoneName); - auto& hs = zoneHosts[std::move(pair)]; - hs.insert(hs.end(), hosts.begin(), hosts.end()); - } - } - - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - auto host = it->first; - LOG(INFO) << "Host: " << host; - auto zoneIter = - std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { - return host == pair.first.first; - }); - - if (zoneIter == zoneHosts.end()) { - LOG(INFO) << it->first << " have lost"; - continue; - } - - auto& hosts = zoneIter->second; - auto name = zoneIter->first.second; - zoneHosts_[name] = hosts; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto partIter = hostParts.find(*hostIter); - LOG(INFO) << "Zone " << name << " have the host " << it->first; - if (partIter == hostParts.end()) { - zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); - } else { - zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); - } - } - } - - for (auto it = zoneHosts.begin(); it != zoneHosts.end(); it++) { - auto host = it->first.first; - auto& hosts = it->second; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto h = *hostIter; - auto iter = std::find_if(hostParts.begin(), hostParts.end(), [h](const auto& pair) -> bool { - return h == pair.first; - }); - - if (iter == hostParts.end()) { - continue; - } - - auto& parts = iter->second; - auto& hp = relatedParts_[host]; - hp.insert(hp.end(), parts.begin(), parts.end()); - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -void Balancer::calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& expand, - std::vector& lost) { - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - VLOG(1) << "Original Host " << it->first << ", parts " << it->second.size(); - if (std::find(activeHosts.begin(), activeHosts.end(), it->first) == activeHosts.end() && - std::find(lost.begin(), lost.end(), it->first) == lost.end()) { - lost.emplace_back(it->first); - } - } - for (auto& h : activeHosts) { - VLOG(1) << "Active host " << h; - if (hostParts.find(h) == hostParts.end()) { - expand.emplace_back(h); - } - } -} - -std::vector> Balancer::sortedHostsByParts(const HostParts& hostParts) { - std::vector> hosts; - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - LOG(INFO) << "Host " << it->first << " parts " << it->second.size(); - hosts.emplace_back(it->first, it->second.size()); - } - std::sort(hosts.begin(), hosts.end(), [](const auto& l, const auto& r) { - if (l.second != r.second) { - return l.second < r.second; - } else { - return l.first.host < r.first.host; - } - }); - return hosts; -} - -Status Balancer::checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId) { - // check host hold the part and alive - auto checkPart = [&](const auto& entry) { - auto& host = entry.first; - auto& parts = entry.second; - return std::find(parts.begin(), parts.end(), partId) != parts.end() && - std::find(activeHosts.begin(), activeHosts.end(), host) != activeHosts.end(); - }; - auto aliveReplica = std::count_if(hostParts.begin(), hostParts.end(), checkPart); - if (aliveReplica >= replica / 2 + 1) { - return Status::OK(); - } - return Status::Error("Not enough alive host hold the part %d", partId); -} - -ErrorOr Balancer::hostWithMinimalParts( - const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; - } - - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; - } - } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; -} - -ErrorOr Balancer::hostWithMinimalPartsForZone( - const HostAddr& source, const HostParts& hostParts, PartitionID partId) { - auto hosts = sortedHostsByParts(hostParts); - for (auto& h : hosts) { - auto it = hostParts.find(h.first); - if (it == hostParts.end()) { - LOG(ERROR) << "Host " << h.first << " not found"; - return nebula::cpp2::ErrorCode::E_NO_HOSTS; - } - - LOG(INFO) << "source " << source << " h.first " << h.first; - if (std::find(it->second.begin(), it->second.end(), partId) == it->second.end()) { - return h.first; - } - } - return nebula::cpp2::ErrorCode::E_NO_HOSTS; -} - -nebula::cpp2::ErrorCode Balancer::leaderBalance() { - if (running_) { - LOG(INFO) << "Balance process still running"; - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; - } - - folly::Promise promise; - auto future = promise.getFuture(); - // Space ID, Replica Factor and Dependent On Group - std::vector> spaces; - auto ret = getAllSpaces(spaces); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Can't get spaces"; - // TODO unify error code - if (ret != nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { - ret = nebula::cpp2::ErrorCode::E_STORE_FAILURE; - } - return ret; - } - - bool expected = false; - if (inLeaderBalance_.compare_exchange_strong(expected, true)) { - hostLeaderMap_.reset(new HostLeaderMap); - auto status = client_->getLeaderDist(hostLeaderMap_.get()).get(); - if (!status.ok() || hostLeaderMap_->empty()) { - LOG(ERROR) << "Get leader distribution failed"; - inLeaderBalance_ = false; - return nebula::cpp2::ErrorCode::E_RPC_FAILURE; - } - - std::vector> futures; - for (const auto& spaceInfo : spaces) { - auto spaceId = std::get<0>(spaceInfo); - auto replicaFactor = std::get<1>(spaceInfo); - auto dependentOnGroup = std::get<2>(spaceInfo); - LeaderBalancePlan plan; - auto balanceResult = buildLeaderBalancePlan( - hostLeaderMap_.get(), spaceId, replicaFactor, dependentOnGroup, plan); - if (!nebula::ok(balanceResult) || !nebula::value(balanceResult)) { - LOG(ERROR) << "Building leader balance plan failed " - << "Space: " << spaceId; - continue; - } - simplifyLeaderBalancePlan(spaceId, plan); - for (const auto& task : plan) { - futures.emplace_back(client_->transLeader(std::get<0>(task), - std::get<1>(task), - std::move(std::get<2>(task)), - std::move(std::get<3>(task)))); - } - } - - int32_t failed = 0; - folly::collectAll(futures) - .via(executor_.get()) - .thenTry([&](const auto& result) { - auto tries = result.value(); - for (const auto& t : tries) { - if (!t.value().ok()) { - ++failed; - } - } - }) - .wait(); - - inLeaderBalance_ = false; - if (failed != 0) { - LOG(ERROR) << failed << " partition failed to transfer leader"; - } - return nebula::cpp2::ErrorCode::SUCCEEDED; - } - return nebula::cpp2::ErrorCode::E_BALANCER_RUNNING; -} - -ErrorOr Balancer::buildLeaderBalancePlan( - HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation) { - PartAllocation peersMap; - HostParts leaderHostParts; - size_t leaderParts = 0; - // store peers of all partitions in peerMap - folly::SharedMutex::ReadHolder rHolder(LockUtils::spaceLock()); - const auto& prefix = MetaKeyUtils::partPrefix(spaceId); - std::unique_ptr iter; - auto retCode = kv_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Access kvstore failed, spaceId " << spaceId << static_cast(retCode); - return retCode; - } - - while (iter->valid()) { - auto key = iter->key(); - PartitionID partId; - memcpy(&partId, key.data() + prefix.size(), sizeof(PartitionID)); - auto peers = MetaKeyUtils::parsePartVal(iter->val()); - peersMap[partId] = std::move(peers); - ++leaderParts; - iter->next(); - } - - int32_t totalParts = 0; - HostParts allHostParts; - auto result = getHostParts(spaceId, dependentOnGroup, allHostParts, totalParts); - if (!nebula::ok(result)) { - return nebula::error(result); - } else { - auto retVal = nebula::value(result); - if (!retVal || totalParts == 0 || allHostParts.empty()) { - LOG(ERROR) << "Invalid space " << spaceId; - return false; - } - } - - std::unordered_set activeHosts; - for (const auto& host : *hostLeaderMap) { - // only balance leader between hosts which have valid partition - if (!allHostParts[host.first].empty()) { - activeHosts.emplace(host.first); - leaderHostParts[host.first] = (*hostLeaderMap)[host.first][spaceId]; - } - } - - if (activeHosts.empty()) { - LOG(ERROR) << "No active hosts"; - return false; - } - - if (dependentOnGroup) { - for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { - auto min = it->second.size() / replicaFactor; - VLOG(3) << "Host: " << it->first << " Bounds: " << min << " : " << min + 1; - hostBounds_[it->first] = std::make_pair(min, min + 1); - } - } else { - size_t activeSize = activeHosts.size(); - size_t globalAvg = leaderParts / activeSize; - size_t globalMin = globalAvg; - size_t globalMax = globalAvg; - if (leaderParts % activeSize != 0) { - globalMax += 1; - } - - if (useDeviation) { - globalMin = std::ceil(static_cast(leaderParts) / activeSize * - (1 - FLAGS_leader_balance_deviation)); - globalMax = std::floor(static_cast(leaderParts) / activeSize * - (1 + FLAGS_leader_balance_deviation)); - } - VLOG(3) << "Build leader balance plan, expected min load: " << globalMin - << ", max load: " << globalMax << " avg: " << globalAvg; - - for (auto it = allHostParts.begin(); it != allHostParts.end(); it++) { - hostBounds_[it->first] = std::make_pair(globalMin, globalMax); - } - } - - while (true) { - int32_t taskCount = 0; - bool hasUnbalancedHost = false; - for (const auto& hostEntry : leaderHostParts) { - auto host = hostEntry.first; - auto& hostMinLoad = hostBounds_[host].first; - auto& hostMaxLoad = hostBounds_[host].second; - int32_t partSize = hostEntry.second.size(); - if (hostMinLoad <= partSize && partSize <= hostMaxLoad) { - VLOG(3) << partSize << " is between min load " << hostMinLoad << " and max load " - << hostMaxLoad; - continue; - } - - hasUnbalancedHost = true; - if (partSize < hostMinLoad) { - // need to acquire leader from other hosts - LOG(INFO) << "Acquire leaders to host: " << host << " loading: " << partSize - << " min loading " << hostMinLoad; - taskCount += acquireLeaders( - allHostParts, leaderHostParts, peersMap, activeHosts, host, plan, spaceId); - } else { - // need to transfer leader to other hosts - LOG(INFO) << "Giveup leaders from host: " << host << " loading: " << partSize - << " max loading " << hostMaxLoad; - taskCount += giveupLeaders(leaderHostParts, peersMap, activeHosts, host, plan, spaceId); - } - } - - // If every host is balanced or no more task during this loop, then the plan - // is done - if (!hasUnbalancedHost || taskCount == 0) { - LOG(INFO) << "Not need balance"; - break; - } - } - return true; -} - -int32_t Balancer::acquireLeaders(HostParts& allHostParts, - HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& target, - LeaderBalancePlan& plan, - GraphSpaceID spaceId) { - // host will loop for the partition which is not leader, and try to acquire the - // leader - int32_t taskCount = 0; - std::vector diff; - std::set_difference(allHostParts[target].begin(), - allHostParts[target].end(), - leaderHostParts[target].begin(), - leaderHostParts[target].end(), - std::back_inserter(diff)); - auto& targetLeaders = leaderHostParts[target]; - size_t minLoad = hostBounds_[target].first; - for (const auto& partId : diff) { - VLOG(3) << "Try acquire leader for part " << partId; - // find the leader of partId - auto sources = peersMap[partId]; - for (const auto& source : sources) { - if (source == target || !activeHosts.count(source)) { - continue; - } - - // if peer is the leader of partId and can transfer, then transfer it to - // host - auto& sourceLeaders = leaderHostParts[source]; - VLOG(3) << "Check peer: " << source << " min load: " << minLoad - << " peerLeaders size: " << sourceLeaders.size(); - auto it = std::find(sourceLeaders.begin(), sourceLeaders.end(), partId); - if (it != sourceLeaders.end() && minLoad < sourceLeaders.size()) { - sourceLeaders.erase(it); - targetLeaders.emplace_back(partId); - plan.emplace_back(spaceId, partId, source, target); - LOG(INFO) << "acquire plan trans leader space: " << spaceId << " part: " << partId - << " from " << source.host << ":" << source.port << " to " << target.host << ":" - << target.port; - ++taskCount; - break; - } - } - - // if host has enough leader, just return - if (targetLeaders.size() == minLoad) { - LOG(INFO) << "Host: " << target << "'s leader reach " << minLoad; - break; - } - } - return taskCount; -} - -int32_t Balancer::giveupLeaders(HostParts& leaderParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& source, - LeaderBalancePlan& plan, - GraphSpaceID spaceId) { - int32_t taskCount = 0; - auto& sourceLeaders = leaderParts[source]; - size_t maxLoad = hostBounds_[source].second; - - // host will try to transfer the extra leaders to other peers - for (auto it = sourceLeaders.begin(); it != sourceLeaders.end();) { - // find the leader of partId - auto partId = *it; - const auto& targets = peersMap[partId]; - bool isErase = false; - - // leader should move to the peer with lowest loading - auto target = - std::min_element(targets.begin(), targets.end(), [&](const auto& l, const auto& r) -> bool { - if (source == l || !activeHosts.count(l)) { - return false; - } - return leaderParts[l].size() < leaderParts[r].size(); - }); - - // If peer can accept this partition leader, than host will transfer to the - // peer - if (target != targets.end()) { - auto& targetLeaders = leaderParts[*target]; - int32_t targetLeaderSize = targetLeaders.size(); - if (targetLeaderSize < hostBounds_[*target].second) { - it = sourceLeaders.erase(it); - targetLeaders.emplace_back(partId); - plan.emplace_back(spaceId, partId, source, *target); - LOG(INFO) << "giveup plan trans leader space: " << spaceId << " part: " << partId - << " from " << source.host << ":" << source.port << " to " << target->host << ":" - << target->port; - ++taskCount; - isErase = true; - } - } - - // if host has enough leader, just return - if (sourceLeaders.size() == maxLoad) { - LOG(INFO) << "Host: " << source << "'s leader reach " << maxLoad; - break; - } - - if (!isErase) { - ++it; - } - } - return taskCount; -} - -void Balancer::simplifyLeaderBalancePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan) { - // Within a leader balance plan, a partition may be moved several times, but - // actually we only need to transfer the leadership of a partition from the - // first host to the last host, and ignore the intermediate ones - std::unordered_map buckets; - for (auto& task : plan) { - buckets[std::get<1>(task)].emplace_back(task); - } - plan.clear(); - for (const auto& partEntry : buckets) { - plan.emplace_back(spaceId, - partEntry.first, - std::get<2>(partEntry.second.front()), - std::get<3>(partEntry.second.back())); - } -} - -nebula::cpp2::ErrorCode Balancer::collectZoneParts(const std::string& groupName, - HostParts& hostParts) { - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - auto retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - // zoneHosts use to record this host belong to zone's hosts - std::unordered_map, std::vector> zoneHosts; - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); - for (auto zoneName : zoneNames) { - auto zoneKey = MetaKeyUtils::zoneKey(zoneName); - std::string zoneValue; - retCode = kv_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); - for (const auto& host : hosts) { - auto pair = std::pair(std::move(host), zoneName); - auto& hs = zoneHosts[std::move(pair)]; - hs.insert(hs.end(), hosts.begin(), hosts.end()); - } - } - - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - auto host = it->first; - auto zoneIter = - std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { - return host == pair.first.first; - }); - - if (zoneIter == zoneHosts.end()) { - LOG(INFO) << it->first << " have lost"; - continue; - } - - auto& hosts = zoneIter->second; - auto name = zoneIter->first.second; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto partIter = hostParts.find(*hostIter); - if (partIter == hostParts.end()) { - zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); - } else { - zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); - } - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -bool Balancer::checkZoneLegal(const HostAddr& source, const HostAddr& target) { - VLOG(3) << "Check " << source << " : " << target; - auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { - return source == pair.first; - }); - - if (sourceIter == zoneParts_.end()) { - LOG(INFO) << "Source " << source << " not found"; - return false; - } - - auto targetIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&target](const auto& pair) { - return target == pair.first; - }); - - if (targetIter == zoneParts_.end()) { - LOG(INFO) << "Target " << target << " not found"; - return false; - } - - LOG(INFO) << sourceIter->second.first << " : " << targetIter->second.first; - return sourceIter->second.first == targetIter->second.first; -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/admin/Balancer.h b/src/meta/processors/admin/Balancer.h deleted file mode 100644 index 4a5331ee2a4..00000000000 --- a/src/meta/processors/admin/Balancer.h +++ /dev/null @@ -1,269 +0,0 @@ -/* Copyright (c) 2019 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_ADMIN_BALANCER_H_ -#define META_ADMIN_BALANCER_H_ - -#include -#include - -#include "common/network/NetworkUtils.h" -#include "common/time/WallClock.h" -#include "kvstore/KVStore.h" -#include "meta/processors/admin/AdminClient.h" -#include "meta/processors/admin/BalancePlan.h" -#include "meta/processors/admin/BalanceTask.h" - -namespace nebula { -namespace meta { - -using HostParts = std::unordered_map>; -using PartAllocation = std::unordered_map>; -using LeaderBalancePlan = std::vector>; -using ZoneNameAndParts = std::pair>; - -/** -There are two interfaces public: - * Balance: it will construct a balance plan and invoked it. If last balance -plan is not succeeded, it will - * try to resume it. - * - * Rollback: In many cases, if some plan failed forever, we call this interface -to rollback. - -Some notes: -1. Balance will generate balance plan according to current active hosts and -parts allocation -2. For the plan, we hope after moving the least parts , it will reach a -reasonable state. -3. Only one balance plan could be invoked at the same time. -4. Each balance plan has one id, and we could show the status by "balance id" -command and after FO, we could resume the balance plan by type "balance" again. -5. Each balance plan contains many balance tasks, the task represents the -minimum movement unit. -6. We save the whole balancePlan state in kvstore to do failover. -7. Each balance task contains serval steps. And it should be executed step by -step. -8. One task failed will result in the whole balance plan failed. -9. Currently, we hope tasks for the same part could be invoked serially - * */ -class Balancer { - FRIEND_TEST(BalanceTest, BalancePartsTest); - FRIEND_TEST(BalanceTest, NormalTest); - FRIEND_TEST(BalanceTest, SimpleTestWithZone); - FRIEND_TEST(BalanceTest, SpecifyHostTest); - FRIEND_TEST(BalanceTest, SpecifyMultiHostTest); - FRIEND_TEST(BalanceTest, MockReplaceMachineTest); - FRIEND_TEST(BalanceTest, SingleReplicaTest); - FRIEND_TEST(BalanceTest, TryToRecoveryTest); - FRIEND_TEST(BalanceTest, RecoveryTest); - FRIEND_TEST(BalanceTest, StopPlanTest); - FRIEND_TEST(BalanceTest, CleanLastInvalidBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, SimpleLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, IntersectHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceTest); - FRIEND_TEST(BalanceTest, ManyHostsLeaderBalancePlanTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithLargerZoneTest); - FRIEND_TEST(BalanceTest, LeaderBalanceWithComplexZoneTest); - FRIEND_TEST(BalanceTest, ExpansionZoneTest); - FRIEND_TEST(BalanceTest, ExpansionHostIntoZoneTest); - FRIEND_TEST(BalanceTest, ShrinkZoneTest); - FRIEND_TEST(BalanceTest, ShrinkHostFromZoneTest); - FRIEND_TEST(BalanceTest, BalanceWithComplexZoneTest); - FRIEND_TEST(BalanceIntegrationTest, LeaderBalanceTest); - FRIEND_TEST(BalanceIntegrationTest, BalanceTest); - - public: - static Balancer* instance(kvstore::KVStore* kv) { - static std::unique_ptr client(new AdminClient(kv)); - static std::unique_ptr balancer(new Balancer(kv, client.get())); - return balancer.get(); - } - - ~Balancer() = default; - - /* - * Return Error if reject the balance request, otherwise return balance id. - * */ - ErrorOr balance(std::vector&& lostHosts = {}); - - /** - * Show balance plan id status. - * */ - ErrorOr show(BalanceID id) const; - - /** - * Stop balance plan by canceling all waiting balance task. - * */ - ErrorOr stop(); - - /** - * Clean invalid plan, return the invalid plan key if any - * */ - ErrorOr cleanLastInValidPlan(); - - /** - * TODO(heng): rollback some balance plan. - */ - Status rollback(BalanceID id) { return Status::Error("unimplemented, %ld", id); } - - /** - * TODO(heng): Execute balance plan from outside. - * */ - Status execute(BalancePlan plan) { - UNUSED(plan); - return Status::Error("Unsupport it yet!"); - } - - /** - * TODO(heng): Execute specific balance plan by id. - * */ - Status execute(BalanceID id) { - UNUSED(id); - return Status::Error("Unsupport it yet!"); - } - - nebula::cpp2::ErrorCode leaderBalance(); - - void finish() { - CHECK(!lock_.try_lock()); - plan_.reset(); - running_ = false; - } - - bool isRunning() { - std::lock_guard lg(lock_); - return running_; - } - - private: - Balancer(kvstore::KVStore* kv, AdminClient* client) : kv_(kv), client_(client) { - executor_.reset(new folly::CPUThreadPoolExecutor(1)); - } - /* - * When the balancer failover, we should recovery the status. - * */ - nebula::cpp2::ErrorCode recovery(); - - /** - * Build balance plan and save it in kvstore. - * */ - nebula::cpp2::ErrorCode buildBalancePlan(std::vector&& lostHosts); - - ErrorOr> genTasks( - GraphSpaceID spaceId, - int32_t spaceReplica, - bool dependentOnGroup, - std::vector&& lostHosts); - - ErrorOr>> fetchHostParts( - GraphSpaceID spaceId, - bool dependentOnGroup, - const HostParts& hostParts, - std::vector& lostHosts); - - ErrorOr getHostParts(GraphSpaceID spaceId, - bool dependentOnGroup, - HostParts& hostParts, - int32_t& totalParts); - - nebula::cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); - - void calDiff(const HostParts& hostParts, - const std::vector& activeHosts, - std::vector& newlyAdded, - std::vector& lost); - - Status checkReplica(const HostParts& hostParts, - const std::vector& activeHosts, - int32_t replica, - PartitionID partId); - - ErrorOr hostWithMinimalParts(const HostParts& hostParts, - PartitionID partId); - - ErrorOr hostWithMinimalPartsForZone(const HostAddr& source, - const HostParts& hostParts, - PartitionID partId); - - bool balanceParts(BalanceID balanceId, - GraphSpaceID spaceId, - HostParts& newHostParts, - int32_t totalParts, - std::vector& tasks, - bool dependentOnGroup); - - nebula::cpp2::ErrorCode transferLostHost(std::vector& tasks, - HostParts& newHostParts, - const HostAddr& source, - GraphSpaceID spaceId, - PartitionID partId, - bool dependentOnGroup); - - std::vector> sortedHostsByParts(const HostParts& hostParts); - - nebula::cpp2::ErrorCode getAllSpaces( - std::vector>& spaces); - - ErrorOr buildLeaderBalancePlan(HostLeaderMap* hostLeaderMap, - GraphSpaceID spaceId, - int32_t replicaFactor, - bool dependentOnGroup, - LeaderBalancePlan& plan, - bool useDeviation = true); - - void simplifyLeaderBalancePlan(GraphSpaceID spaceId, LeaderBalancePlan& plan); - - int32_t acquireLeaders(HostParts& allHostParts, - HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& target, - LeaderBalancePlan& plan, - GraphSpaceID spaceId); - - int32_t giveupLeaders(HostParts& leaderHostParts, - PartAllocation& peersMap, - std::unordered_set& activeHosts, - const HostAddr& source, - LeaderBalancePlan& plan, - GraphSpaceID spaceId); - - nebula::cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); - - bool checkZoneLegal(const HostAddr& source, const HostAddr& target); - - private: - std::atomic_bool running_{false}; - kvstore::KVStore* kv_{nullptr}; - AdminClient* client_{nullptr}; - // Current running plan. - std::shared_ptr plan_{nullptr}; - std::unique_ptr executor_; - std::atomic_bool inLeaderBalance_{false}; - - // Host => Graph => Partitions - std::unique_ptr hostLeaderMap_; - mutable std::mutex lock_; - - std::unordered_map> hostBounds_; - - // TODO: (darion) nesting map maybe better - std::unordered_map zoneParts_; - std::unordered_map> zoneHosts_; - - // if the space dependent on group, it use to record the partition - // contained in the zone related to the node. - std::unordered_map> relatedParts_; - - bool innerBalance_ = false; -}; - -} // namespace meta -} // namespace nebula - -#endif // META_ADMIN_BALANCER_H_ From 4886d4fb33317639fc676fe9c533aa2ff3450ca2 Mon Sep 17 00:00:00 2001 From: CBS Date: Tue, 23 Nov 2021 15:04:19 +0800 Subject: [PATCH 36/53] Support https for es client (#3150) * Support https for es client * addressed comments Co-authored-by: Doodle <13706157+critical27@users.noreply.github.com> --- src/common/plugin/fulltext/FTUtils.h | 20 ++++++-- .../fulltext/test/FulltextPluginTest.cpp | 12 ++--- .../executor/admin/ShowTSClientsExecutor.cpp | 7 ++- src/graph/util/FTIndexUtils.cpp | 1 + src/interface/meta.thrift | 1 + .../plugins/elasticsearch/ESListener.cpp | 1 + src/parser/AdminSentences.cpp | 11 ++++- src/parser/parser.yy | 36 ++++++++++++++- src/parser/scanner.lex | 2 + src/parser/test/ParserTest.cpp | 46 +++++++++++++++++++ 10 files changed, 123 insertions(+), 14 deletions(-) diff --git a/src/common/plugin/fulltext/FTUtils.h b/src/common/plugin/fulltext/FTUtils.h index fea6d89f261..3e2382641c8 100644 --- a/src/common/plugin/fulltext/FTUtils.h +++ b/src/common/plugin/fulltext/FTUtils.h @@ -38,15 +38,19 @@ struct HttpClient { HostAddr host; std::string user; std::string password; + std::string connType{"http"}; HttpClient() = default; ~HttpClient() = default; explicit HttpClient(HttpClient&& v) noexcept - : host(std::move(v.host)), user(std::move(v.user)), password(std::move(v.password)) {} + : host(std::move(v.host)), + user(std::move(v.user)), + password(std::move(v.password)), + connType(std::move(v.connType)) {} explicit HttpClient(const HttpClient& v) noexcept - : host(v.host), user(v.user), password(v.password) {} + : host(v.host), user(v.user), password(v.password), connType(v.connType) {} explicit HttpClient(HostAddr&& h) noexcept : host(std::move(h)) {} @@ -58,10 +62,20 @@ struct HttpClient { HttpClient(const HostAddr& h, const std::string& u, const std::string& p) noexcept : host(h), user(u), password(p) {} + HttpClient(HostAddr&& h, std::string&& u, std::string&& p, std::string&& c) noexcept + : host(std::move(h)), user(std::move(u)), password(std::move(p)), connType(std::move(c)) {} + + HttpClient(const HostAddr& h, + const std::string& u, + const std::string& p, + const std::string& c) noexcept + : host(h), user(u), password(p), connType(std::move(c)) {} + void clear() { host.clear(); user.clear(); password.clear(); + connType.clear(); } std::string toString() const { @@ -72,7 +86,7 @@ struct HttpClient { os << ":" << password; } } - os << " \"http://" << host.host << ":" << host.port << "/"; + os << " -k \"" << connType << "://" << host.host << ":" << host.port << "/"; return os.str(); } }; diff --git a/src/common/plugin/fulltext/test/FulltextPluginTest.cpp b/src/common/plugin/fulltext/test/FulltextPluginTest.cpp index c79550da633..d62a372924e 100644 --- a/src/common/plugin/fulltext/test/FulltextPluginTest.cpp +++ b/src/common/plugin/fulltext/test/FulltextPluginTest.cpp @@ -41,7 +41,7 @@ TEST(FulltextPluginTest, ESIndexCheckTest) { auto ret = ESGraphAdapter().indexExistsCmd(client, "test_index"); auto expected = "/usr/bin/curl -H \"Content-Type: application/json; charset=utf-8\" " - "-XGET \"http://127.0.0.1:9200/_cat/indices/test_index?format=json\""; + "-XGET -k \"http://127.0.0.1:9200/_cat/indices/test_index?format=json\""; ASSERT_EQ(expected, ret); } @@ -51,7 +51,7 @@ TEST(FulltextPluginTest, ESCreateIndexTest) { auto ret = ESGraphAdapter().createIndexCmd(client, "test_index"); auto expected = "/usr/bin/curl -H \"Content-Type: application/json; charset=utf-8\" " - "-XPUT \"http://127.0.0.1:9200/test_index\""; + "-XPUT -k \"http://127.0.0.1:9200/test_index\""; ASSERT_EQ(expected, ret); } @@ -61,7 +61,7 @@ TEST(FulltextPluginTest, ESDropIndexTest) { auto ret = ESGraphAdapter().dropIndexCmd(client, "test_index"); auto expected = "/usr/bin/curl -H \"Content-Type: application/json; charset=utf-8\" " - "-XDELETE \"http://127.0.0.1:9200/test_index\""; + "-XDELETE -k \"http://127.0.0.1:9200/test_index\""; ASSERT_EQ(expected, ret); } @@ -72,7 +72,7 @@ TEST(FulltextPluginTest, ESPutTest) { auto header = ESStorageAdapter().putHeader(hc, item); std::string expected = "/usr/bin/curl -H \"Content-Type: application/json; charset=utf-8\" " - "-XPUT \"http://127.0.0.1:9200/index1/_doc/" + "-XPUT -k \"http://127.0.0.1:9200/index1/_doc/" "00000000018c43de7b01bca674276c43e09b3ec5baYWFhYQ==\""; ASSERT_EQ(expected, header); @@ -97,7 +97,7 @@ TEST(FulltextPluginTest, ESBulkTest) { auto header = ESStorageAdapter().bulkHeader(hc); std::string expected = "/usr/bin/curl -H \"Content-Type: application/x-ndjson; " - "charset=utf-8\" -XPOST \"http://127.0.0.1:9200/_bulk\""; + "charset=utf-8\" -XPOST -k \"http://127.0.0.1:9200/_bulk\""; ASSERT_EQ(expected, header); std::vector bodies; @@ -251,7 +251,7 @@ TEST(FulltextPluginTest, ESPrefixTest) { auto header = ESGraphAdapter().header(client, item, limit); std::string expected = "/usr/bin/curl -H \"Content-Type: application/json; charset=utf-8\" " - "-XGET \"http://127.0.0.1:9200/index1/_search?timeout=10ms\""; + "-XGET -k \"http://127.0.0.1:9200/index1/_search?timeout=10ms\""; ASSERT_EQ(expected, header); auto body = ESGraphAdapter().prefixBody("aa"); diff --git a/src/graph/executor/admin/ShowTSClientsExecutor.cpp b/src/graph/executor/admin/ShowTSClientsExecutor.cpp index b23292abbc5..f5981a6c1f3 100644 --- a/src/graph/executor/admin/ShowTSClientsExecutor.cpp +++ b/src/graph/executor/admin/ShowTSClientsExecutor.cpp @@ -25,9 +25,12 @@ folly::Future ShowTSClientsExecutor::showTSClients() { return resp.status(); } auto value = std::move(resp).value(); - DataSet v({"Host", "Port"}); + DataSet v({"Host", "Port", "Connection type"}); for (const auto &client : value) { - nebula::Row r({client.host.host, client.host.port}); + nebula::Row r; + r.values.emplace_back(client.host.host); + r.values.emplace_back(client.host.port); + r.values.emplace_back(client.conn_type_ref().has_value() ? *client.get_conn_type() : "http"); v.emplace_back(std::move(r)); } return finish(std::move(v)); diff --git a/src/graph/util/FTIndexUtils.cpp b/src/graph/util/FTIndexUtils.cpp index 683ad437291..289d0d20606 100644 --- a/src/graph/util/FTIndexUtils.cpp +++ b/src/graph/util/FTIndexUtils.cpp @@ -42,6 +42,7 @@ StatusOr> FTIndexUtils::getTSClients( hc.user = *c.user_ref(); hc.password = *c.pwd_ref(); } + hc.connType = c.conn_type_ref().has_value() ? *c.get_conn_type() : "http"; tsClients.emplace_back(std::move(hc)); } return tsClients; diff --git a/src/interface/meta.thrift b/src/interface/meta.thrift index f75e5751bf1..166d738623c 100644 --- a/src/interface/meta.thrift +++ b/src/interface/meta.thrift @@ -991,6 +991,7 @@ struct FTClient { 1: required common.HostAddr host, 2: optional binary user, 3: optional binary pwd, + 4: optional binary conn_type, } struct SignInFTServiceReq { diff --git a/src/kvstore/plugins/elasticsearch/ESListener.cpp b/src/kvstore/plugins/elasticsearch/ESListener.cpp index 767298d7dbf..2d899d61a6c 100644 --- a/src/kvstore/plugins/elasticsearch/ESListener.cpp +++ b/src/kvstore/plugins/elasticsearch/ESListener.cpp @@ -31,6 +31,7 @@ void ESListener::init() { hc.user = *c.user_ref(); hc.password = *c.pwd_ref(); } + hc.connType = c.conn_type_ref().has_value() ? *c.get_conn_type() : "http"; esClients_.emplace_back(std::move(hc)); } diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index e85079af095..54f4f29dea2 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -299,6 +299,13 @@ std::string SignInTextServiceSentence::toString() const { buf += client.get_host().host; buf += ":"; buf += std::to_string(client.get_host().port); + if (client.conn_type_ref().has_value()) { + std::string connType = *client.get_conn_type(); + auto toupper = [](auto c) { return ::toupper(c); }; + std::transform(connType.begin(), connType.end(), connType.begin(), toupper); + buf += ", "; + buf += connType; + } if (client.user_ref().has_value() && !(*client.user_ref()).empty()) { buf += ", \""; buf += *client.get_user(); @@ -310,10 +317,10 @@ std::string SignInTextServiceSentence::toString() const { buf += "\""; } buf += ")"; - buf += ","; + buf += ", "; } if (!buf.empty()) { - buf.resize(buf.size() - 1); + buf.resize(buf.size() - 2); } return buf; } diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 135fa4335b4..0d2c2695029 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -194,7 +194,7 @@ static constexpr size_t kCommentLengthLimit = 256; %token KW_UNWIND KW_SKIP KW_OPTIONAL %token KW_CASE KW_THEN KW_ELSE KW_END %token KW_GROUP KW_ZONE KW_GROUPS KW_ZONES KW_INTO -%token KW_LISTENER KW_ELASTICSEARCH KW_FULLTEXT +%token KW_LISTENER KW_ELASTICSEARCH KW_FULLTEXT KW_HTTPS KW_HTTP %token KW_AUTO KW_FUZZY KW_PREFIX KW_REGEXP KW_WILDCARD %token KW_TEXT KW_SEARCH KW_CLIENTS KW_SIGN KW_SERVICE KW_TEXT_SEARCH %token KW_ANY KW_SINGLE KW_NONE @@ -532,6 +532,8 @@ unreserved_keyword | KW_POINT { $$ = new std::string("point"); } | KW_LINESTRING { $$ = new std::string("linestring"); } | KW_POLYGON { $$ = new std::string("polygon"); } + | KW_HTTP { $$ = new std::string("http"); } + | KW_HTTPS { $$ = new std::string("https"); } ; expression @@ -1807,6 +1809,18 @@ text_search_client_item $$->set_host(*$2); delete $2; } + | L_PAREN host_item COMMA KW_HTTP R_PAREN { + $$ = new nebula::meta::cpp2::FTClient(); + $$->set_host(*$2); + $$->set_conn_type("http"); + delete $2; + } + | L_PAREN host_item COMMA KW_HTTPS R_PAREN { + $$ = new nebula::meta::cpp2::FTClient(); + $$->set_host(*$2); + $$->set_conn_type("https"); + delete $2; + } | L_PAREN host_item COMMA STRING COMMA STRING R_PAREN { $$ = new nebula::meta::cpp2::FTClient(); $$->set_host(*$2); @@ -1816,6 +1830,26 @@ text_search_client_item delete $4; delete $6; } + | L_PAREN host_item COMMA KW_HTTP COMMA STRING COMMA STRING R_PAREN { + $$ = new nebula::meta::cpp2::FTClient(); + $$->set_host(*$2); + $$->set_user(*$6); + $$->set_pwd(*$8); + $$->set_conn_type("http"); + delete $2; + delete $6; + delete $8; + } + | L_PAREN host_item COMMA KW_HTTPS COMMA STRING COMMA STRING R_PAREN { + $$ = new nebula::meta::cpp2::FTClient(); + $$->set_host(*$2); + $$->set_user(*$6); + $$->set_pwd(*$8); + $$->set_conn_type("https"); + delete $2; + delete $6; + delete $8; + } ; text_search_client_list diff --git a/src/parser/scanner.lex b/src/parser/scanner.lex index ffe942cfec8..87be1d017eb 100644 --- a/src/parser/scanner.lex +++ b/src/parser/scanner.lex @@ -233,6 +233,8 @@ IP_OCTET ([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]) "INTO" { return TokenType::KW_INTO; } "LISTENER" { return TokenType::KW_LISTENER; } "ELASTICSEARCH" { return TokenType::KW_ELASTICSEARCH; } +"HTTP" { return TokenType::KW_HTTP; } +"HTTPS" { return TokenType::KW_HTTPS; } "FULLTEXT" { return TokenType::KW_FULLTEXT; } "AUTO" { return TokenType::KW_AUTO; } "FUZZY" { return TokenType::KW_FUZZY; } diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index 2fbbc03a56f..5267339a4c2 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -2945,23 +2945,69 @@ TEST_F(ParserTest, FullTextServiceTest) { std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200)"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTP)"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTPS)"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTPS, \"user\")"; + auto result = parse(query); + ASSERT_FALSE(result.ok()); } { std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200), (127.0.0.1:9300)"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTPS), (127.0.0.1:9300)"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); } { std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, \"user\", \"password\")"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTP, \"user\", \"password\")"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTPS, \"user\", \"password\")"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } { std::string query = "SIGN IN TEXT SERVICE (127.0.0.1:9200, \"user\", \"password\"), " "(127.0.0.1:9200, \"user\", \"password\")"; auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); + } + { + std::string query = + "SIGN IN TEXT SERVICE (127.0.0.1:9200, HTTP, \"user\", \"password\"), " + "(127.0.0.1:9200, HTTPS, \"user\", \"password\")"; + auto result = parse(query); + ASSERT_TRUE(result.ok()) << result.status(); + ASSERT_EQ(result.value()->toString(), query); } { std::string query = "SIGN OUT TEXT SERVICE"; From bf0c3a1fec82732de3af14b56f709ee3c17dbeb7 Mon Sep 17 00:00:00 2001 From: yaphet <4414314+darionyaphet@users.noreply.github.com> Date: Tue, 23 Nov 2021 15:39:39 +0800 Subject: [PATCH 37/53] remove some useless code (#3341) Co-authored-by: kyle.cao --- .../processors/job/BalanceJobExecutor.cpp | 58 ------------------- src/meta/processors/job/BalanceJobExecutor.h | 2 - 2 files changed, 60 deletions(-) diff --git a/src/meta/processors/job/BalanceJobExecutor.cpp b/src/meta/processors/job/BalanceJobExecutor.cpp index 81ee4aa6231..56ebaaaba4e 100644 --- a/src/meta/processors/job/BalanceJobExecutor.cpp +++ b/src/meta/processors/job/BalanceJobExecutor.cpp @@ -755,64 +755,6 @@ ErrorOr DataBalanceJobExecutor::hostWithMinim return nebula::cpp2::ErrorCode::E_NO_HOSTS; } -nebula::cpp2::ErrorCode BalanceJobExecutor::collectZoneParts(const std::string& groupName, - HostParts& hostParts) { - auto groupKey = MetaKeyUtils::groupKey(groupName); - std::string groupValue; - auto retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, groupKey, &groupValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get group " << groupName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - // zoneHosts use to record this host belong to zone's hosts - std::unordered_map, std::vector> zoneHosts; - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(groupValue)); - for (auto zoneName : zoneNames) { - auto zoneKey = MetaKeyUtils::zoneKey(zoneName); - std::string zoneValue; - retCode = kvstore_->get(kDefaultSpaceId, kDefaultPartId, zoneKey, &zoneValue); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - LOG(ERROR) << "Get zone " << zoneName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - - auto hosts = MetaKeyUtils::parseZoneHosts(std::move(zoneValue)); - for (const auto& host : hosts) { - auto pair = std::pair(std::move(host), zoneName); - auto& hs = zoneHosts[std::move(pair)]; - hs.insert(hs.end(), hosts.begin(), hosts.end()); - } - } - - for (auto it = hostParts.begin(); it != hostParts.end(); it++) { - auto host = it->first; - auto zoneIter = - std::find_if(zoneHosts.begin(), zoneHosts.end(), [host](const auto& pair) -> bool { - return host == pair.first.first; - }); - - if (zoneIter == zoneHosts.end()) { - LOG(INFO) << it->first << " have lost"; - continue; - } - - auto& hosts = zoneIter->second; - auto name = zoneIter->first.second; - for (auto hostIter = hosts.begin(); hostIter != hosts.end(); hostIter++) { - auto partIter = hostParts.find(*hostIter); - if (partIter == hostParts.end()) { - zoneParts_[it->first] = ZoneNameAndParts(name, std::vector()); - } else { - zoneParts_[it->first] = ZoneNameAndParts(name, partIter->second); - } - } - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - bool DataBalanceJobExecutor::checkZoneLegal(const HostAddr& source, const HostAddr& target) { VLOG(3) << "Check " << source << " : " << target; auto sourceIter = std::find_if(zoneParts_.begin(), zoneParts_.end(), [&source](const auto& pair) { diff --git a/src/meta/processors/job/BalanceJobExecutor.h b/src/meta/processors/job/BalanceJobExecutor.h index 52440bf6eaf..0de93f5e16e 100644 --- a/src/meta/processors/job/BalanceJobExecutor.h +++ b/src/meta/processors/job/BalanceJobExecutor.h @@ -59,8 +59,6 @@ class BalanceJobExecutor : public MetaJobExecutor { nebula::cpp2::ErrorCode assembleZoneParts(const std::string& groupName, HostParts& hostParts); - nebula::cpp2::ErrorCode collectZoneParts(const std::string& groupName, HostParts& hostParts); - nebula::cpp2::ErrorCode save(const std::string& k, const std::string& v); protected: From ae83b8923fb97f3e1d076dd700ff65b19f981eda Mon Sep 17 00:00:00 2001 From: haowen <19355821+wenhaocs@users.noreply.github.com> Date: Tue, 23 Nov 2021 01:17:55 -0800 Subject: [PATCH 38/53] Add key value separation feature to Nebula storage engine (#3281) * Squash commit: enable kv separation for nebula change comment update comment lint * format * lint Co-authored-by: panda-sheep <59197347+panda-sheep@users.noreply.github.com> Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- conf/nebula-storaged.conf.default | 10 ++++ conf/nebula-storaged.conf.production | 10 ++++ src/kvstore/RocksEngineConfig.cpp | 70 ++++++++++++++++++---- src/kvstore/RocksEngineConfig.h | 4 ++ src/kvstore/test/RocksEngineConfigTest.cpp | 25 ++++++++ 5 files changed, 106 insertions(+), 13 deletions(-) diff --git a/conf/nebula-storaged.conf.default b/conf/nebula-storaged.conf.default index 4aead99ba67..59cd0f80cd9 100644 --- a/conf/nebula-storaged.conf.default +++ b/conf/nebula-storaged.conf.default @@ -95,6 +95,16 @@ # Whether or not to enable rocksdb's whole key bloom filter, disabled by default. --enable_rocksdb_whole_key_filtering=false +############## Key-Value separation ############## +# Whether or not to enable BlobDB (RocksDB key-value separation support) +--rocksdb_enable_kv_separation=false +# RocksDB key value separation threshold. Values at or above this threshold will be written to blob files during flush or compaction. +--rocksdb_kv_separation_threshold=0 +# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd +--rocksdb_blob_compression=lz4 +# Whether to garbage collect blobs during compaction +--rocksdb_enable_blob_garbage_collection=true + ############## rocksdb Options ############## # rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma --rocksdb_db_options={} diff --git a/conf/nebula-storaged.conf.production b/conf/nebula-storaged.conf.production index 8789ebdd08a..688fa36910e 100644 --- a/conf/nebula-storaged.conf.production +++ b/conf/nebula-storaged.conf.production @@ -101,6 +101,16 @@ # Whether or not to enable rocksdb's whole key bloom filter, disabled by default. --enable_rocksdb_whole_key_filtering=false +############## Key-Value separation ############## +# Whether or not to enable BlobDB (RocksDB key-value separation support) +--rocksdb_enable_kv_separation=false +# RocksDB key value separation threshold. Values at or above this threshold will be written to blob files during flush or compaction. +--rocksdb_kv_separation_threshold=0 +# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd +--rocksdb_blob_compression=lz4 +# Whether to garbage collect blobs during compaction +--rocksdb_enable_blob_garbage_collection=true + ############### misc #################### --snapshot_part_rate_limit=10485760 --snapshot_batch_size=1048576 diff --git a/src/kvstore/RocksEngineConfig.cpp b/src/kvstore/RocksEngineConfig.cpp index 701436c5d3f..28153769179 100644 --- a/src/kvstore/RocksEngineConfig.cpp +++ b/src/kvstore/RocksEngineConfig.cpp @@ -112,23 +112,43 @@ DEFINE_int32(rocksdb_backup_interval_secs, 300, "Rocksdb backup directory, only used in PlainTable format"); +DEFINE_bool(rocksdb_enable_kv_separation, + false, + "Whether or not to enable BlobDB (RocksDB key-value separation support)"); + +DEFINE_uint64(rocksdb_kv_separation_threshold, + 0, + "RocksDB key value separation threshold. Values at or above this threshold will be " + "written to blob files during flush or compaction." + "This value is only effective when enable_kv_separation is true."); + +DEFINE_string(rocksdb_blob_compression, + "snappy", + "Compression algorithm for blobs, " + "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2"); + +DEFINE_bool(rocksdb_enable_blob_garbage_collection, + true, + "Set this to true to make BlobDB actively relocate valid blobs " + "from the oldest blob files as they are encountered during compaction"); + namespace nebula { namespace kvstore { -static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { - static std::unordered_map m = { - {"no", rocksdb::kNoCompression}, - {"snappy", rocksdb::kSnappyCompression}, - {"lz4", rocksdb::kLZ4Compression}, - {"lz4hc", rocksdb::kLZ4HCCompression}, - {"zstd", rocksdb::kZSTD}, - {"zlib", rocksdb::kZlibCompression}, - {"bzip2", rocksdb::kBZip2Compression}}; +static const std::unordered_map kCompressionTypeMap = { + {"no", rocksdb::kNoCompression}, + {"snappy", rocksdb::kSnappyCompression}, + {"lz4", rocksdb::kLZ4Compression}, + {"lz4hc", rocksdb::kLZ4HCCompression}, + {"zstd", rocksdb::kZSTD}, + {"zlib", rocksdb::kZlibCompression}, + {"bzip2", rocksdb::kBZip2Compression}}; +static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { // Set the general compression algorithm { - auto it = m.find(FLAGS_rocksdb_compression); - if (it == m.end()) { + auto it = kCompressionTypeMap.find(FLAGS_rocksdb_compression); + if (it == kCompressionTypeMap.end()) { LOG(ERROR) << "Unsupported compression type: " << FLAGS_rocksdb_compression; return rocksdb::Status::InvalidArgument(); } @@ -149,8 +169,8 @@ static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { if (compressions[i].empty()) { compressions[i] = FLAGS_rocksdb_compression; } - auto it = m.find(compressions[i]); - if (it == m.end()) { + auto it = kCompressionTypeMap.find(compressions[i]); + if (it == kCompressionTypeMap.end()) { LOG(ERROR) << "Unsupported compression type: " << compressions[i]; return rocksdb::Status::InvalidArgument(); } @@ -161,6 +181,25 @@ static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { return rocksdb::Status::OK(); } +static rocksdb::Status initRocksdbKVSeparation(rocksdb::Options& baseOpts) { + if (FLAGS_rocksdb_enable_kv_separation) { + baseOpts.enable_blob_files = true; + baseOpts.min_blob_size = FLAGS_rocksdb_kv_separation_threshold; + + // set blob compresstion algorithm + auto it = kCompressionTypeMap.find(FLAGS_rocksdb_blob_compression); + if (it == kCompressionTypeMap.end()) { + LOG(ERROR) << "Unsupported compression type: " << FLAGS_rocksdb_blob_compression; + return rocksdb::Status::InvalidArgument(); + } + baseOpts.blob_compression_type = it->second; + + // set blob gc + baseOpts.enable_blob_garbage_collection = FLAGS_rocksdb_enable_blob_garbage_collection; + } + return rocksdb::Status::OK(); +} + rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, GraphSpaceID spaceId, int32_t vidLen) { @@ -214,6 +253,11 @@ rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, return s; } + s = initRocksdbKVSeparation(baseOpts); + if (!s.ok()) { + return s; + } + if (FLAGS_num_compaction_threads > 0) { static std::shared_ptr compaction_thread_limiter{ rocksdb::NewConcurrentTaskLimiter("compaction", FLAGS_num_compaction_threads)}; diff --git a/src/kvstore/RocksEngineConfig.h b/src/kvstore/RocksEngineConfig.h index c4d3c396b7c..dcbd4d136d4 100644 --- a/src/kvstore/RocksEngineConfig.h +++ b/src/kvstore/RocksEngineConfig.h @@ -62,6 +62,10 @@ DECLARE_string(rocksdb_wal_dir); DECLARE_string(rocksdb_backup_dir); DECLARE_int32(rocksdb_backup_interval_secs); +// rocksdb key value separation options +DECLARE_bool(rocksdb_enable_kv_separation); +DECLARE_uint64(rocksdb_kv_separation_threshold); + namespace nebula { namespace kvstore { diff --git a/src/kvstore/test/RocksEngineConfigTest.cpp b/src/kvstore/test/RocksEngineConfigTest.cpp index 49acd862b72..c79b6cc8181 100644 --- a/src/kvstore/test/RocksEngineConfigTest.cpp +++ b/src/kvstore/test/RocksEngineConfigTest.cpp @@ -157,6 +157,31 @@ TEST(RocksEngineConfigTest, CompressionConfigTest) { } } +TEST(RocksEngineConfigTest, KeyValueSeparationTest) { + FLAGS_rocksdb_enable_kv_separation = true; + FLAGS_rocksdb_kv_separation_threshold = 10; + rocksdb::Options options; + auto status = initRocksdbOptions(options, 1); + ASSERT_TRUE(status.ok()) << status.ToString(); + + rocksdb::DB* db = nullptr; + SCOPE_EXIT { delete db; }; + options.create_if_missing = true; + fs::TempDir rootPath("/tmp/RocksDBCompressionConfigTest.XXXXXX"); + status = rocksdb::DB::Open(options, rootPath.path(), &db); + ASSERT_TRUE(status.ok()) << status.ToString(); + + std::string key = "test"; + std::string value = "This is a test value with value size greater than 10"; + status = db->Put(rocksdb::WriteOptions(), key, value); + ASSERT_TRUE(status.ok()) << status.ToString(); + + std::string read_value; + status = db->Get(rocksdb::ReadOptions(), key, &read_value); + ASSERT_TRUE(status.ok()) << status.ToString(); + ASSERT_EQ(value, read_value); +} + } // namespace kvstore } // namespace nebula From 29f52ae85db553ddaca6db0578dc7dad4510d94b Mon Sep 17 00:00:00 2001 From: "kyle.cao" Date: Tue, 23 Nov 2021 18:27:38 +0800 Subject: [PATCH 39/53] fix index existence check (#3315) * fix index existence check fix ut * small fix --- src/meta/processors/BaseProcessor-inl.h | 19 ++++------- src/meta/test/IndexProcessorTest.cpp | 28 ---------------- tests/tck/features/index/Index.feature | 43 +++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 40 deletions(-) diff --git a/src/meta/processors/BaseProcessor-inl.h b/src/meta/processors/BaseProcessor-inl.h index 5b09579bd73..dc9a2ec1bc2 100644 --- a/src/meta/processors/BaseProcessor-inl.h +++ b/src/meta/processors/BaseProcessor-inl.h @@ -548,22 +548,17 @@ nebula::cpp2::ErrorCode BaseProcessor::ftIndexCheck( template bool BaseProcessor::checkIndexExist(const std::vector& fields, const cpp2::IndexItem& item) { - if (fields.size() == 0) { - LOG(ERROR) << "Index " << item.get_index_name() << " has existed"; - return true; + const auto& itemFields = item.get_fields(); + if (fields.size() != itemFields.size()) { + return false; } - for (size_t i = 0; i < fields.size(); i++) { - if (fields[i].get_name() != item.get_fields()[i].get_name()) { - break; - } - - if (i == fields.size() - 1) { - LOG(ERROR) << "Index " << item.get_index_name() << " has existed"; - return true; + if (fields[i].get_name() != itemFields[i].get_name()) { + return false; } } - return false; + LOG(ERROR) << "Index " << item.get_index_name() << " has existed"; + return true; } template diff --git a/src/meta/test/IndexProcessorTest.cpp b/src/meta/test/IndexProcessorTest.cpp index 14aafd24ad9..57a4c5627e9 100644 --- a/src/meta/test/IndexProcessorTest.cpp +++ b/src/meta/test/IndexProcessorTest.cpp @@ -214,20 +214,6 @@ TEST(IndexProcessorTest, TagIndexTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } - { - // Allow to create tag index on no fields - cpp2::CreateTagIndexReq req; - req.set_space_id(1); - req.set_tag_name("tag_0"); - std::vector fields{}; - req.set_fields(std::move(fields)); - req.set_index_name("no_field_index"); - auto* processor = CreateTagIndexProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_NE(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - } { cpp2::CreateTagIndexReq req; req.set_space_id(1); @@ -591,20 +577,6 @@ TEST(IndexProcessorTest, EdgeIndexTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } - { - // Allow to create edge index on no fields - cpp2::CreateEdgeIndexReq req; - req.set_space_id(1); - req.set_edge_name("edge_0"); - std::vector fields{}; - req.set_fields(std::move(fields)); - req.set_index_name("no_field_index"); - auto* processor = CreateEdgeIndexProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_NE(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - } { cpp2::CreateEdgeIndexReq req; req.set_space_id(1); diff --git a/tests/tck/features/index/Index.feature b/tests/tck/features/index/Index.feature index 432dd06153c..12e7336994e 100644 --- a/tests/tck/features/index/Index.feature +++ b/tests/tck/features/index/Index.feature @@ -943,3 +943,46 @@ Feature: IndexTest_Vid_String Then the result should be, in any order: | Tag Index Name | Create Tag Index | | "player_age_index" | "CREATE TAG INDEX `player_age_index` ON `player` (\n `age`\n)" | + + Scenario: IndexTest existence check + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + create tag recinfo(name string,tm bool,id int); + insert vertex recinfo(name,tm,id) values "r1":("czp",true,1); + create tag index recinfo_index on recinfo(); + create tag index recinfo_name_index on recinfo(name(8)); + create tag index recinfo_multi_index on recinfo(name(8),tm,id); + """ + When executing query: + """ + drop tag index recinfo_name_index + """ + Then the execution should be successful + When executing query: + """ + create tag index recinfo_name_index on recinfo(name(8)); + """ + Then the execution should be successful + When executing query: + """ + create tag index recinfo_index on recinfo(); + """ + Then a ExecutionError should be raised at runtime: Existed! + When executing query: + """ + drop tag index recinfo_index + """ + Then the execution should be successful + When executing query: + """ + create tag index recinfo_index on recinfo(); + """ + Then the execution should be successful + Then drop the used space From e8b45d339ed5fea3616e20b73cde964dd72042fd Mon Sep 17 00:00:00 2001 From: "kyle.cao" Date: Tue, 23 Nov 2021 19:50:59 +0800 Subject: [PATCH 40/53] fix index degeneration (#3311) --- src/graph/optimizer/rule/IndexScanRule.cpp | 14 ++- .../tck/features/match/IndexSelecting.feature | 99 +++++++++++++++++++ 2 files changed, 105 insertions(+), 8 deletions(-) create mode 100644 tests/tck/features/match/IndexSelecting.feature diff --git a/src/graph/optimizer/rule/IndexScanRule.cpp b/src/graph/optimizer/rule/IndexScanRule.cpp index a7e4ad3601a..a19f0554e22 100644 --- a/src/graph/optimizer/rule/IndexScanRule.cpp +++ b/src/graph/optimizer/rule/IndexScanRule.cpp @@ -64,7 +64,10 @@ StatusOr IndexScanRule::transform(OptContext* ctx, FilterItems items; ScanKind kind; NG_RETURN_IF_ERROR(analyzeExpression(filter, &items, &kind, isEdge(groupNode))); - NG_RETURN_IF_ERROR(createIndexQueryCtx(iqctx, kind, items, qctx, groupNode)); + auto status = createIndexQueryCtx(iqctx, kind, items, qctx, groupNode); + if (!status.ok()) { + NG_RETURN_IF_ERROR(createIndexQueryCtx(iqctx, qctx, groupNode)); + } } const auto* oldIN = groupNode->node(); @@ -479,20 +482,15 @@ std::vector IndexScanRule::findValidIndex(graph::QueryContext* qctx, std::vector validIndexes; // Find indexes for match all fields by where condition. for (const auto& index : indexes) { - bool allColsHint = true; const auto& fields = index->get_fields(); for (const auto& item : items.items) { auto it = std::find_if(fields.begin(), fields.end(), [item](const auto& field) { return field.get_name() == item.col_; }); - if (it == fields.end()) { - allColsHint = false; - break; + if (it != fields.end()) { + validIndexes.emplace_back(index); } } - if (allColsHint) { - validIndexes.emplace_back(index); - } } // If the first field of the index does not match any condition, the index is // invalid. remove it from validIndexes. diff --git a/tests/tck/features/match/IndexSelecting.feature b/tests/tck/features/match/IndexSelecting.feature new file mode 100644 index 00000000000..4744fd7ec6c --- /dev/null +++ b/tests/tck/features/match/IndexSelecting.feature @@ -0,0 +1,99 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License. +Feature: Index selecting for match statement + + Background: Prepare a new space + Given an empty graph + And create a space with following options: + | partition_num | 9 | + | replica_factor | 1 | + | vid_type | FIXED_STRING(30) | + | charset | utf8 | + | collate | utf8_bin | + And having executed: + """ + CREATE tag player(name string, age int, score int, gender bool); + """ + And having executed: + """ + INSERT VERTEX player(name, age, score, gender) VALUES "Tim Duncan":("Tim Duncan", 42, 28, true),"Yao Ming":("Yao Ming", 38, 23, true),"Nneka Ogwumike":("Nneka Ogwumike", 35, 13, false); + """ + And having executed: + """ + create tag index player_index on player(); + create tag index player_name_index on player(name(8)); + create tag index player_age_name_index on player(age,name(8)); + """ + And wait 3 seconds + + Scenario: Test Index selecting + When submit a job: + """ + rebuild tag index player_index; + """ + Then wait the job to finish + When submit a job: + """ + rebuild tag index player_name_index; + """ + Then wait the job to finish + When submit a job: + """ + rebuild tag index player_age_name_index; + """ + Then wait the job to finish + # Prefix Index + When profiling query: + """ + MATCH (v:player {name: "Yao Ming"}) RETURN v.name AS name + """ + Then the result should be, in any order, with relax comparison: + | name | + | "Yao Ming" | + And the execution plan should be: + | id | name | dependencies | operator info | + | 6 | Project | 2 | | + | 2 | AppendVertices | 5 | | + | 5 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"PREFIX"}}} | + | 0 | Start | | | + # Range Index + When profiling query: + """ + MATCH (v:player) WHERE v.name > "Tim" and v.name < "Zom" RETURN v.name AS name + """ + Then the result should be, in any order, with relax comparison: + | name | + | "Yao Ming" | + | "Tim Duncan" | + And the execution plan should be: + | id | name | dependencies | operator info | + | 9 | Project | 7 | | + | 7 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"RANGE"}}} | + | 0 | Start | | | + # Degeneration to FullScan Index + When executing query: + """ + MATCH (v:player) WHERE v.score < 20 RETURN v.name AS name + """ + Then the result should be, in any order, with relax comparison: + | name | + | "Nneka Ogwumike" | + # Degeneration to Prefix Index + When profiling query: + """ + MATCH (v:player) WHERE v.name == "Tim Duncan" and v.score == 28 RETURN v.name AS name + """ + Then the result should be, in any order, with relax comparison: + | name | + | "Tim Duncan" | + And the execution plan should be: + | id | name | dependencies | operator info | + | 9 | Project | 7 | | + | 7 | Filter | 2 | | + | 2 | AppendVertices | 6 | | + | 6 | IndexScan | 0 | {"indexCtx": {"columnHints":{"scanType":"PREFIX"}}} | + | 0 | Start | | | + Then drop the used space From 65414383489da4df8bce8e06406996636b79decd Mon Sep 17 00:00:00 2001 From: jimingquan Date: Tue, 23 Nov 2021 23:41:55 +0800 Subject: [PATCH 41/53] Lookup subgraph format yield (#3139) * format lookup * fix lookup ci test * fix geo testcase * fix lookup ci test * disable missing yield clause in subgraph sentence * fix subgraph ci test * fix test error --- src/graph/validator/GetSubgraphValidator.cpp | 19 +- src/graph/validator/LookupValidator.cpp | 29 +- .../test/GetSubgraphValidatorTest.cpp | 39 +- .../validator/test/LookupValidatorTest.cpp | 41 +- .../bugfix/TruncatedStringIndex.feature | 6 +- .../features/delete/DeleteTag.IntVid.feature | 40 +- tests/tck/features/delete/DeleteTag.feature | 40 +- tests/tck/features/geo/GeoBase.feature | 266 +++--- tests/tck/features/index/Index.IntVid.feature | 8 +- tests/tck/features/index/Index.feature | 72 +- tests/tck/features/index/TagEdgeIndex.feature | 28 +- tests/tck/features/insert/Insert.feature | 10 +- tests/tck/features/lookup/ByIndex.feature | 764 ++++++++-------- .../features/lookup/ByIndex.intVid.feature | 714 +++++++-------- .../features/lookup/EdgeIndexFullScan.feature | 110 +-- .../tck/features/lookup/LookUp.IntVid.feature | 442 ++++----- tests/tck/features/lookup/LookUp.feature | 558 +++++------ tests/tck/features/lookup/LookUpLimit.feature | 52 +- tests/tck/features/lookup/LookupEdge.feature | 20 +- tests/tck/features/lookup/LookupEdge2.feature | 14 +- tests/tck/features/lookup/LookupTag.feature | 20 +- tests/tck/features/lookup/LookupTag2.feature | 24 +- tests/tck/features/lookup/Output.feature | 24 +- .../tck/features/lookup/Output.intVid.feature | 32 +- .../features/lookup/TagIndexFullScan.feature | 82 +- tests/tck/features/lookup/WithYield.feature | 64 +- .../features/lookup/WithYield.intVid.feature | 82 +- .../optimizer/CollapseProjectRule.feature | 5 +- .../PushFilterDownLeftJoinRule.feature | 4 +- .../tck/features/schema/CreateSpaceAs.feature | 24 +- .../features/subgraph/subgraph.IntVid.feature | 865 +----------------- tests/tck/features/subgraph/subgraph.feature | 639 ++----------- 32 files changed, 1882 insertions(+), 3255 deletions(-) diff --git a/src/graph/validator/GetSubgraphValidator.cpp b/src/graph/validator/GetSubgraphValidator.cpp index c0d9f4a5576..e69a5ae582c 100644 --- a/src/graph/validator/GetSubgraphValidator.cpp +++ b/src/graph/validator/GetSubgraphValidator.cpp @@ -94,30 +94,21 @@ Status GetSubgraphValidator::validateBothInOutBound(BothInOutClause* out) { } Status GetSubgraphValidator::validateYield(YieldClause* yield) { - auto pool = qctx_->objPool(); if (yield == nullptr) { - // version 3.0: return Status::SemanticError("No Yield Clause"); - auto* yieldColumns = new YieldColumns(); - auto* vertex = new YieldColumn(LabelExpression::make(pool, "_vertices")); - yieldColumns->addColumn(vertex); - if (subgraphCtx_->steps.steps() != 0) { - auto* edge = new YieldColumn(LabelExpression::make(pool, "_edges")); - yieldColumns->addColumn(edge); - } - yield = pool->add(new YieldClause(yieldColumns)); + return Status::SemanticError("Missing yield clause."); } auto size = yield->columns().size(); outputs_.reserve(size); + auto pool = qctx_->objPool(); YieldColumns* newCols = pool->add(new YieldColumns()); for (const auto& col : yield->columns()) { - std::string lowerStr = col->expr()->toString(); - folly::toLowerAscii(lowerStr); - if (lowerStr == "vertices" || lowerStr == "_vertices") { + const std::string& colStr = col->expr()->toString(); + if (colStr == "VERTICES") { subgraphCtx_->getVertexProp = true; auto* newCol = new YieldColumn(InputPropertyExpression::make(pool, "VERTICES"), col->name()); newCols->addColumn(newCol); - } else if (lowerStr == "edges" || lowerStr == "_edges") { + } else if (colStr == "EDGES") { if (subgraphCtx_->steps.steps() == 0) { return Status::SemanticError("Get Subgraph 0 STEPS only support YIELD vertices"); } diff --git a/src/graph/validator/LookupValidator.cpp b/src/graph/validator/LookupValidator.cpp index d5d18e7eb4a..6426b8975fc 100644 --- a/src/graph/validator/LookupValidator.cpp +++ b/src/graph/validator/LookupValidator.cpp @@ -160,36 +160,21 @@ Status LookupValidator::validateYieldTag() { } Status LookupValidator::validateYield() { - auto pool = qctx_->objPool(); - auto* newCols = pool->add(new YieldColumns()); - lookupCtx_->yieldExpr = newCols; - if (lookupCtx_->isEdge) { - idxReturnCols_.emplace_back(kSrc); - idxReturnCols_.emplace_back(kDst); - idxReturnCols_.emplace_back(kRank); - idxReturnCols_.emplace_back(kType); - outputs_.emplace_back(kSrcVID, vidType_); - outputs_.emplace_back(kDstVID, vidType_); - outputs_.emplace_back(kRanking, Value::Type::INT); - newCols->addColumn(new YieldColumn(ColumnExpression::make(pool, 0), kSrcVID)); - newCols->addColumn(new YieldColumn(ColumnExpression::make(pool, 1), kDstVID)); - newCols->addColumn(new YieldColumn(ColumnExpression::make(pool, 2), kRanking)); - } else { - idxReturnCols_.emplace_back(kVid); - outputs_.emplace_back(kVertexID, vidType_); - newCols->addColumn(new YieldColumn(ColumnExpression::make(pool, 0), kVertexID)); - } - auto yieldClause = sentence()->yieldClause(); if (yieldClause == nullptr) { - extractExprProps(); - return Status::OK(); + return Status::SemanticError("Missing yield clause."); } lookupCtx_->dedup = yieldClause->isDistinct(); + lookupCtx_->yieldExpr = qctx_->objPool()->add(new YieldColumns()); if (lookupCtx_->isEdge) { + idxReturnCols_.emplace_back(nebula::kSrc); + idxReturnCols_.emplace_back(nebula::kDst); + idxReturnCols_.emplace_back(nebula::kRank); + idxReturnCols_.emplace_back(nebula::kType); NG_RETURN_IF_ERROR(validateYieldEdge()); } else { + idxReturnCols_.emplace_back(nebula::kVid); NG_RETURN_IF_ERROR(validateYieldTag()); } if (exprProps_.hasInputVarProperty()) { diff --git a/src/graph/validator/test/GetSubgraphValidatorTest.cpp b/src/graph/validator/test/GetSubgraphValidatorTest.cpp index 61ef6d9814e..46d86e1a36e 100644 --- a/src/graph/validator/test/GetSubgraphValidatorTest.cpp +++ b/src/graph/validator/test/GetSubgraphValidatorTest.cpp @@ -19,7 +19,7 @@ using PK = nebula::graph::PlanNode::Kind; TEST_F(GetSubgraphValidatorTest, Base) { { - std::string query = "GET SUBGRAPH FROM \"1\""; + std::string query = "GET SUBGRAPH FROM \"1\" YIELD vertices as nodes"; std::vector expected = { PK::kProject, PK::kDataCollect, @@ -32,7 +32,7 @@ TEST_F(GetSubgraphValidatorTest, Base) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "GET SUBGRAPH WITH PROP 3 STEPS FROM \"1\""; + std::string query = "GET SUBGRAPH WITH PROP 3 STEPS FROM \"1\" YIELD edges as relationships"; std::vector expected = { PK::kProject, PK::kDataCollect, @@ -58,7 +58,8 @@ TEST_F(GetSubgraphValidatorTest, Base) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "GET SUBGRAPH WITH PROP FROM \"1\", \"2\" IN like"; + std::string query = + "GET SUBGRAPH WITH PROP FROM \"1\", \"2\" IN like YIELD vertices as a, edges as b"; std::vector expected = { PK::kProject, PK::kDataCollect, @@ -76,7 +77,7 @@ TEST_F(GetSubgraphValidatorTest, Input) { { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src | GET SUBGRAPH WITH " - "PROP FROM $-.src"; + "PROP FROM $-.src YIELD vertices as a, edges as b"; std::vector expected = { PK::kProject, PK::kDataCollect, @@ -95,7 +96,7 @@ TEST_F(GetSubgraphValidatorTest, Input) { { std::string query = "$a = GO FROM \"1\" OVER like YIELD like._src AS src; GET SUBGRAPH " - "FROM $a.src"; + "FROM $a.src YIELD vertices as a, edges as b"; std::vector expected = { PK::kProject, PK::kDataCollect, @@ -112,7 +113,7 @@ TEST_F(GetSubgraphValidatorTest, Input) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "GET SUBGRAPH 0 STEPS FROM \"1\""; + std::string query = "GET SUBGRAPH 0 STEPS FROM \"1\" YIELD vertices as nodes"; std::vector expected = { PK::kAggregate, PK::kGetVertices, @@ -121,7 +122,8 @@ TEST_F(GetSubgraphValidatorTest, Input) { EXPECT_TRUE(checkResult(query, expected)); } { - std::string query = "GET SUBGRAPH WITH PROP 0 STEPS FROM \"1\", \"2\", \"3\""; + std::string query = + "GET SUBGRAPH WITH PROP 0 STEPS FROM \"1\", \"2\", \"3\" YIELD vertices as nodes"; std::vector expected = { PK::kAggregate, PK::kGetVertices, @@ -132,7 +134,7 @@ TEST_F(GetSubgraphValidatorTest, Input) { { std::string query = "GO FROM \"1\" OVER like YIELD like._src AS src | GET SUBGRAPH WITH " - "PROP 0 STEPS FROM $-.src"; + "PROP 0 STEPS FROM $-.src YIELD vertices as nodes"; std::vector expected = { PK::kAggregate, PK::kGetVertices, @@ -147,7 +149,7 @@ TEST_F(GetSubgraphValidatorTest, Input) { { std::string query = "$a = GO FROM \"1\" OVER like YIELD like._src AS src; GET SUBGRAPH " - "WITH PROP 0 STEPS FROM $a.src"; + "WITH PROP 0 STEPS FROM $a.src YIELD vertices as nodes"; std::vector expected = { PK::kAggregate, PK::kGetVertices, @@ -162,6 +164,11 @@ TEST_F(GetSubgraphValidatorTest, Input) { } TEST_F(GetSubgraphValidatorTest, invalidYield) { + { + std::string query = "GET SUBGRAPH WITH PROP FROM \"Tim Duncan\""; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), "SemanticError: Missing yield clause."); + } { std::string query = "GET SUBGRAPH WITH PROP FROM \"Tim Duncan\" YIELD vertice"; auto result = checkResult(query); @@ -202,19 +209,19 @@ TEST_F(GetSubgraphValidatorTest, invalidYield) { TEST_F(GetSubgraphValidatorTest, RefNotExist) { { - std::string query = "GET SUBGRAPH WITH PROP FROM $-.id"; + std::string query = "GET SUBGRAPH WITH PROP FROM $-.id YIELD edges as b"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: `$-.id', not exist prop `id'"); } { - std::string query = "GET SUBGRAPH WITH PROP FROM $a.id"; + std::string query = "GET SUBGRAPH WITH PROP FROM $a.id YIELD edges as b"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: `$a.id', not exist variable `a'"); } { std::string query = "GO FROM \"1\" OVER like YIELD $$.person.age AS id | GET SUBGRAPH WITH " - "PROP FROM $-.id"; + "PROP FROM $-.id YIELD vertices as nodes"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: `$-.id', the srcs should be type of " @@ -223,7 +230,7 @@ TEST_F(GetSubgraphValidatorTest, RefNotExist) { { std::string query = "$a = GO FROM \"1\" OVER like YIELD $$.person.age AS ID; GET SUBGRAPH " - "FROM $a.ID"; + "FROM $a.ID YIELD edges as relationships"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: `$a.ID', the srcs should be type of " @@ -232,21 +239,21 @@ TEST_F(GetSubgraphValidatorTest, RefNotExist) { { std::string query = "$a = GO FROM \"1\" OVER like YIELD like._src AS src; GET SUBGRAPH " - "WITH PROP FROM $b.src"; + "WITH PROP FROM $b.src YIELD vertices as nodes"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: `$b.src', not exist variable `b'"); } { std::string query = "GO FROM \"1\" OVER like YIELD like._dst AS id, like._src AS id | GET " - "SUBGRAPH FROM $-.id"; + "SUBGRAPH FROM $-.id YIELD edges as relationships"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: Duplicate Column Name : `id'"); } { std::string query = "$a = GO FROM \"1\" OVER like YIELD like._dst AS id, like._src AS id; " - "GET SUBGRAPH WITH PROP FROM $a.id"; + "GET SUBGRAPH WITH PROP FROM $a.id YIELD vertices as nodes"; auto result = checkResult(query); EXPECT_EQ(std::string(result.message()), "SemanticError: Duplicate Column Name : `id'"); } diff --git a/src/graph/validator/test/LookupValidatorTest.cpp b/src/graph/validator/test/LookupValidatorTest.cpp index 397b87bcb13..239deb2e261 100644 --- a/src/graph/validator/test/LookupValidatorTest.cpp +++ b/src/graph/validator/test/LookupValidatorTest.cpp @@ -18,8 +18,8 @@ TEST_F(LookupValidatorTest, InputOutput) { // pipe { const std::string query = - "LOOKUP ON person where person.age == 35 | " - "FETCH PROP ON person $-.VertexID YIELD vertex as node"; + "LOOKUP ON person where person.age == 35 YIELD id(vertex) as id | " + "FETCH PROP ON person $-.id YIELD vertex as node"; EXPECT_TRUE(checkResult(query, { PlanNode::Kind::kProject, @@ -48,8 +48,8 @@ TEST_F(LookupValidatorTest, InputOutput) { // variable { const std::string query = - "$a = LOOKUP ON person where person.age == 35; " - "FETCH PROP ON person $a.VertexID YIELD vertex as node"; + "$a = LOOKUP ON person where person.age == 35 YIELD id(vertex) as id; " + "FETCH PROP ON person $a.id YIELD vertex as node"; EXPECT_TRUE(checkResult(query, { PlanNode::Kind::kProject, @@ -63,8 +63,7 @@ TEST_F(LookupValidatorTest, InputOutput) { // var with yield { const std::string query = - "$a = LOOKUP ON person where person.age == 35 YIELD person.name AS " - "name;" + "$a = LOOKUP ON person where person.age == 35 YIELD person.name AS name;" "FETCH PROP ON person $a.name YIELD vertex as node"; EXPECT_TRUE(checkResult(query, { @@ -79,7 +78,10 @@ TEST_F(LookupValidatorTest, InputOutput) { } TEST_F(LookupValidatorTest, InvalidYieldExpression) { - // TODO(shylock) + { + const std::string query = "LOOKUP ON person where person.age > 20;"; + EXPECT_FALSE(checkResult(query, {})); + } { const std::string query = "LOOKUP ON person where person.age == 35 YIELD person.age + 1 AS age;"; @@ -95,40 +97,49 @@ TEST_F(LookupValidatorTest, InvalidYieldExpression) { TEST_F(LookupValidatorTest, InvalidFilterExpression) { { - const std::string query = "LOOKUP ON person where person.age == person.name;"; + const std::string query = + "LOOKUP ON person where person.age == person.name YIELD vertex as node;"; EXPECT_FALSE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age > person.name;"; + const std::string query = + "LOOKUP ON person where person.age > person.name YIELD vertex as node;"; EXPECT_FALSE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age != person.name;"; + const std::string query = + "LOOKUP ON person where person.age != person.name YIELD vertex as node;"; EXPECT_FALSE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age + 1 > 5;"; + const std::string query = "LOOKUP ON person where person.age + 1 > 5 YIELD person.age;"; EXPECT_FALSE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age > person.name + 5;"; + const std::string query = + "LOOKUP ON person where person.age > person.name + 5 YIELD id(vertex);"; EXPECT_FALSE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where 1 + 5 < person.age;"; + const std::string query = "LOOKUP ON person where 1 + 5 < person.age YIELD vertex as node;"; EXPECT_TRUE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age > 1 + 5;"; + const std::string query = "LOOKUP ON person where person.age > 1 + 5 YIELD vertex as node;"; EXPECT_TRUE(checkResult(query, {})); } { - const std::string query = "LOOKUP ON person where person.age > abs(-5);"; + const std::string query = "LOOKUP ON person where person.age > abs(-5) YIELD id(vertex);"; EXPECT_TRUE(checkResult(query, {})); } } TEST_F(LookupValidatorTest, wrongYield) { + { + std::string query = "LOOKUP ON person"; + auto result = checkResult(query); + EXPECT_EQ(std::string(result.message()), "SemanticError: Missing yield clause."); + } { std::string query = "LOOKUP ON person YIELD vertex"; auto result = checkResult(query); diff --git a/tests/tck/features/bugfix/TruncatedStringIndex.feature b/tests/tck/features/bugfix/TruncatedStringIndex.feature index f1f30ecccc7..149d775e4ff 100644 --- a/tests/tck/features/bugfix/TruncatedStringIndex.feature +++ b/tests/tck/features/bugfix/TruncatedStringIndex.feature @@ -25,16 +25,16 @@ Feature: Truncated string index Then the execution should be successful When executing query: """ - LOOKUP ON person WHERE person.name=="abc" + LOOKUP ON person WHERE person.name=="abc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ LOOKUP ON person WHERE person.name=="abc" YIELD person.name """ Then the result should be, in any order: - | VertexID | person.name | + | person.name | When executing query: """ match (v:person) where v.name == "abc" return v; diff --git a/tests/tck/features/delete/DeleteTag.IntVid.feature b/tests/tck/features/delete/DeleteTag.IntVid.feature index 2eb8c0df69a..af279dd4701 100644 --- a/tests/tck/features/delete/DeleteTag.IntVid.feature +++ b/tests/tck/features/delete/DeleteTag.IntVid.feature @@ -23,10 +23,10 @@ Feature: Delete int vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -50,10 +50,10 @@ Feature: Delete int vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | Then drop the used space Scenario: delete int vid one vertex multiple tag @@ -76,10 +76,10 @@ Feature: Delete int vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -102,10 +102,10 @@ Feature: Delete int vid of tag | bachelor.name | bachelor.speciality | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | Then drop the used space Scenario: delete int vid one vertex all tag @@ -128,10 +128,10 @@ Feature: Delete int vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -154,10 +154,10 @@ Feature: Delete int vid of tag | bachelor.name | bachelor.speciality | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | Then drop the used space Scenario: delete int vid multiple vertex one tag @@ -180,17 +180,17 @@ Feature: Delete int vid of tag | "Tony Parker" | 36 | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | | "Tim Duncan" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tony Parker" + LOOKUP ON player WHERE player.name == "Tony Parker" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | | "Tony Parker" | # delete one tag When executing query: @@ -213,16 +213,16 @@ Feature: Delete int vid of tag | player.name | player.age | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | When executing query: """ - LOOKUP ON player WHERE player.name == "Tony Parker" + LOOKUP ON player WHERE player.name == "Tony Parker" YIELD id(vertex) as id """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | id | Then drop the used space Scenario: delete int vid from pipe diff --git a/tests/tck/features/delete/DeleteTag.feature b/tests/tck/features/delete/DeleteTag.feature index e2ac581883c..4e01b0cdeff 100644 --- a/tests/tck/features/delete/DeleteTag.feature +++ b/tests/tck/features/delete/DeleteTag.feature @@ -23,10 +23,10 @@ Feature: Delete string vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -50,10 +50,10 @@ Feature: Delete string vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: delete string vid one vertex multiple tag @@ -76,10 +76,10 @@ Feature: Delete string vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -102,10 +102,10 @@ Feature: Delete string vid of tag | bachelor.name | bachelor.speciality | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: delete string vid one vertex all tag @@ -128,10 +128,10 @@ Feature: Delete string vid of tag | "Tim Duncan" | "psychology" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Tim Duncan" | # delete one tag When executing query: @@ -154,10 +154,10 @@ Feature: Delete string vid of tag | bachelor.name | bachelor.speciality | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: delete string vid multiple vertex one tag @@ -180,17 +180,17 @@ Feature: Delete string vid of tag | "Tony Parker" | 36 | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Tim Duncan" | When executing query: """ - LOOKUP ON player WHERE player.name == "Tony Parker" + LOOKUP ON player WHERE player.name == "Tony Parker" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Tony Parker" | # delete one tag When executing query: @@ -213,16 +213,16 @@ Feature: Delete string vid of tag | player.name | player.age | When executing query: """ - LOOKUP ON player WHERE player.name == "Tim Duncan" + LOOKUP ON player WHERE player.name == "Tim Duncan" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON player WHERE player.name == "Tony Parker" + LOOKUP ON player WHERE player.name == "Tony Parker" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: delete string vid from pipe diff --git a/tests/tck/features/geo/GeoBase.feature b/tests/tck/features/geo/GeoBase.feature index 24983b3e051..31705b7e9ca 100644 --- a/tests/tck/features/geo/GeoBase.feature +++ b/tests/tck/features/geo/GeoBase.feature @@ -276,10 +276,10 @@ Feature: Geo base LOOKUP ON any_shape YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | And the execution plan should be: | id | name | dependencies | operator info | | 2 | Project | 3 | | @@ -290,29 +290,29 @@ Feature: Geo base LOOKUP ON only_point YIELD ST_ASText(only_point.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_point.geo) | - | "201" | "POINT(3 8)" | + | ST_ASText(only_point.geo) | + | "POINT(3 8)" | When executing query: """ LOOKUP ON only_linestring YIELD ST_ASText(only_linestring.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_linestring.geo) | - | "302" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(only_linestring.geo) | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON only_polygon YIELD ST_ASText(only_polygon.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_polygon.geo) | - | "403" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(only_polygon.geo) | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ LOOKUP ON any_shape_edge YIELD ST_ASText(any_shape_edge.geo); """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | ST_ASText(any_shape_edge.geo) | - | "201" | "302" | 0 | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape_edge.geo) | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | # Match with geo index When executing query: """ @@ -352,77 +352,77 @@ Feature: Geo base # Lookup on geo index again When executing query: """ - LOOKUP ON any_shape YIELD ST_ASText(any_shape.geo); + LOOKUP ON any_shape YIELD id(vertex) as id, ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | id | ST_ASText(any_shape.geo) | + | "101" | "POINT(3 8)" | + | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "108" | "POINT(72.3 84.6)" | When executing query: """ - LOOKUP ON only_point YIELD ST_ASText(only_point.geo); + LOOKUP ON only_point YIELD id(vertex) as id, ST_ASText(only_point.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_point.geo) | - | "201" | "POINT(3 8)" | - | "208" | "POINT(0.01 0.01)" | + | id | ST_ASText(only_point.geo) | + | "201" | "POINT(3 8)" | + | "208" | "POINT(0.01 0.01)" | When executing query: """ - LOOKUP ON only_linestring YIELD ST_ASText(only_linestring.geo); + LOOKUP ON only_linestring YIELD id(vertex) as id, ST_ASText(only_linestring.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_linestring.geo) | - | "302" | "LINESTRING(3 8, 4.7 73.23)" | - | "308" | "LINESTRING(9 9, 8 8, 7 7, 9 9)" | + | id | ST_ASText(only_linestring.geo) | + | "302" | "LINESTRING(3 8, 4.7 73.23)" | + | "308" | "LINESTRING(9 9, 8 8, 7 7, 9 9)" | When executing query: """ - LOOKUP ON only_polygon YIELD ST_ASText(only_polygon.geo); + LOOKUP ON only_polygon YIELD id(vertex) as id, ST_ASText(only_polygon.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(only_polygon.geo) | - | "403" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "408" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | id | ST_ASText(only_polygon.geo) | + | "403" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "408" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ - LOOKUP ON any_shape_edge YIELD ST_ASText(any_shape_edge.geo); + LOOKUP ON any_shape_edge YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, ST_ASText(any_shape_edge.geo); """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | ST_ASText(any_shape_edge.geo) | - | "108" | "408" | 0 | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | - | "201" | "302" | 0 | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | src | dst | rank | ST_ASText(any_shape_edge.geo) | + | "108" | "408" | 0 | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | + | "201" | "302" | 0 | "POLYGON((0 1, 1 2, 2 3, 0 1))" | # Lookup and Yield geo functions When executing query: """ - LOOKUP ON any_shape YIELD S2_CellIdFromPoint(any_shape.geo); + LOOKUP ON any_shape YIELD id(vertex) as id, S2_CellIdFromPoint(any_shape.geo); """ Then the result should be, in any order: - | VertexID | S2_CellIdFromPoint(any_shape.geo) | - | "101" | 1166542697063163289 | - | "102" | BAD_DATA | - | "103" | BAD_DATA | - | "108" | 4987215245349669805 | + | id | S2_CellIdFromPoint(any_shape.geo) | + | "101" | 1166542697063163289 | + | "102" | BAD_DATA | + | "103" | BAD_DATA | + | "108" | 4987215245349669805 | When executing query: """ - LOOKUP ON any_shape YIELD S2_CoveringCellIds(any_shape.geo); + LOOKUP ON any_shape YIELD id(vertex) as id, S2_CoveringCellIds(any_shape.geo); """ Then the result should be, in any order: - | VertexID | S2_CoveringCellIds(any_shape.geo) | - | "101" | [1166542697063163289] | - | "102" | [1167558203395801088, 1279022294173220864, 1315051091192184832, 1351079888211148800, 5039527983027585024, 5062045981164437504, 5174635971848699904, 5183643171103440896] | - | "103" | [1152391494368201343, 1153466862374223872, 1153554823304445952, 1153836298281156608, 1153959443583467520, 1154240918560178176, 1160503736791990272, 1160591697722212352] | - | "108" | [4987215245349669805] | + | id | S2_CoveringCellIds(any_shape.geo) | + | "101" | [1166542697063163289] | + | "102" | [1167558203395801088, 1279022294173220864, 1315051091192184832, 1351079888211148800, 5039527983027585024, 5062045981164437504, 5174635971848699904, 5183643171103440896] | + | "103" | [1152391494368201343, 1153466862374223872, 1153554823304445952, 1153836298281156608, 1153959443583467520, 1154240918560178176, 1160503736791990272, 1160591697722212352] | + | "108" | [4987215245349669805] | # Lookup with geo predicates which could be index accelerated # ST_Intersects When profiling query: """ - LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, ST_GeogFromText('POINT(3 8)')) YIELD ST_ASText(any_shape.geo); + LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, ST_GeogFromText('POINT(3 8)')) YIELD id(vertex) as id, ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | id | ST_ASText(any_shape.geo) | + | "101" | "POINT(3 8)" | + | "102" | "LINESTRING(3 8, 4.7 73.23)" | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -433,29 +433,29 @@ Feature: Geo base LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, ST_GeogFromText('POINT(0 1)')) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, ST_GeogFromText('POINT(4.7 73.23)')) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, ST_Point(72.3, 84.6)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Intersects(ST_Point(72.3, 84.6), any_shape.geo) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Intersects(any_shape.geo, any_shape.geo) YIELD ST_ASText(any_shape.geo); @@ -486,65 +486,65 @@ Feature: Geo base LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) < 1.0 YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) <= 1.0 YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) <= 8909524.383934561 YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) < 8909524.383934561 YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) < 8909524.383934563 YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE 8909524.383934560 > ST_Distance(any_shape.geo, ST_Point(3, 8)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ LOOKUP ON any_shape WHERE 8909524.3839345630 >= ST_Distance(any_shape.geo, ST_Point(3, 8)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Distance(any_shape.geo, ST_Point(3, 8)) > 1.0 YIELD ST_ASText(any_shape.geo); @@ -561,58 +561,58 @@ Feature: Geo base LOOKUP ON any_shape WHERE ST_DWithin(any_shape.geo, ST_Point(3, 8), 8909524.383934561) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_DWithin(any_shape.geo, ST_Point(3, 8), 100.0) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_DWithin(any_shape.geo, ST_Point(3, 8), 100) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | # ST_Covers When executing query: """ LOOKUP ON any_shape WHERE ST_Covers(any_shape.geo, ST_Point(3, 8)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Covers(any_shape.geo, ST_Point(3, 8)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "POINT(3 8)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "POINT(3 8)" | + | "LINESTRING(3 8, 4.7 73.23)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Covers(ST_GeogFromText('POLYGON((-0.7 3.8,3.6 3.2,1.8 -0.8,-3.4 2.4,-0.7 3.8))'), any_shape.geo) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | When executing query: """ LOOKUP ON any_shape WHERE ST_CoveredBy(any_shape.geo, ST_GeogFromText('POLYGON((-0.7 3.8,3.6 3.2,1.8 -0.8,-3.4 2.4,-0.7 3.8))')) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | ST_ASText(any_shape.geo) | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | # Update vertex with index When executing query: """ @@ -631,19 +631,19 @@ Feature: Geo base LOOKUP ON any_shape YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "LINESTRING(3 8, 6 16)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "LINESTRING(3 8, 6 16)" | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_DWithin(any_shape.geo, ST_Point(3, 8), 100.0) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "101" | "LINESTRING(3 8, 6 16)" | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "LINESTRING(3 8, 6 16)" | + | "LINESTRING(3 8, 4.7 73.23)" | # Update edge with index When executing query: """ @@ -662,17 +662,17 @@ Feature: Geo base LOOKUP ON any_shape_edge YIELD ST_ASText(any_shape_edge.geo); """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | ST_ASText(any_shape_edge.geo) | - | "108" | "408" | 0 | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | - | "201" | "302" | 0 | "POINT(-1 -1)" | + | ST_ASText(any_shape_edge.geo) | + | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | + | "POINT(-1 -1)" | When executing query: """ LOOKUP ON any_shape_edge WHERE ST_Intersects(any_shape_edge.geo, ST_Point(-1, -1)) YIELD ST_ASText(any_shape_edge.geo); """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | ST_ASText(any_shape_edge.geo) | - | "108" | "408" | 0 | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | - | "201" | "302" | 0 | "POINT(-1 -1)" | + | ST_ASText(any_shape_edge.geo) | + | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | + | "POINT(-1 -1)" | # Delete vertex with index When executing query: """ @@ -690,17 +690,17 @@ Feature: Geo base LOOKUP ON any_shape YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | - | "103" | "POLYGON((0 1, 1 2, 2 3, 0 1))" | - | "108" | "POINT(72.3 84.6)" | + | ST_ASText(any_shape.geo) | + | "LINESTRING(3 8, 4.7 73.23)" | + | "POLYGON((0 1, 1 2, 2 3, 0 1))" | + | "POINT(72.3 84.6)" | When executing query: """ LOOKUP ON any_shape WHERE ST_Covers(any_shape.geo, ST_Point(3, 8)) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | - | "102" | "LINESTRING(3 8, 4.7 73.23)" | + | ST_ASText(any_shape.geo) | + | "LINESTRING(3 8, 4.7 73.23)" | # Delete edge with index When executing query: """ @@ -718,14 +718,14 @@ Feature: Geo base LOOKUP ON any_shape_edge YIELD ST_ASText(any_shape_edge.geo); """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | ST_ASText(any_shape_edge.geo) | - | "108" | "408" | 0 | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | + | ST_ASText(any_shape_edge.geo) | + | "POLYGON((-20 -20, -20 20, 20 20, 20 -20, -20 -20), (1 1, 2 2, 0 2, 1 1))" | When executing query: """ LOOKUP ON any_shape WHERE ST_Intersects(ST_Point(-1, -1), any_shape.geo) YIELD ST_ASText(any_shape.geo); """ Then the result should be, in any order: - | VertexID | ST_ASText(any_shape.geo) | + | ST_ASText(any_shape.geo) | # Drop tag index When executing query: """ @@ -735,7 +735,7 @@ Feature: Geo base And wait 3 seconds When executing query: """ - LOOKUP ON any_shape; + LOOKUP ON any_shape YIELD id(vertex) as id; """ Then a ExecutionError should be raised at runtime: There is no index to use at runtime # Drop edge index @@ -747,7 +747,7 @@ Feature: Geo base And wait 3 seconds When executing query: """ - LOOKUP ON any_shape_edge; + LOOKUP ON any_shape_edge YIELD edge as e; """ Then a ExecutionError should be raised at runtime: There is no index to use at runtime # Drop tag diff --git a/tests/tck/features/index/Index.IntVid.feature b/tests/tck/features/index/Index.IntVid.feature index a5534596fcd..c28f438d2de 100644 --- a/tests/tck/features/index/Index.IntVid.feature +++ b/tests/tck/features/index/Index.IntVid.feature @@ -554,15 +554,15 @@ Feature: IndexTest_Vid_Int LOOKUP ON tag_1 WHERE tag_1.col5 == 5 YIELD tag_1.col5, tag_1.col1 """ Then the result should be, in any order: - | VertexID | tag_1.col5 | tag_1.col1 | - | 100 | 5 | true | + | tag_1.col5 | tag_1.col1 | + | 5 | true | When executing query: """ LOOKUP ON tag_1 WHERE tag_1.col5 == 5 YIELD tag_1.col1, tag_1.col5 """ Then the result should be, in any order: - | VertexID | tag_1.col1 | tag_1.col5 | - | 100 | true | 5 | + | tag_1.col1 | tag_1.col5 | + | true | 5 | Then drop the used space Scenario: IndexTest IntVid RebuildTagIndexStatusInfo diff --git a/tests/tck/features/index/Index.feature b/tests/tck/features/index/Index.feature index 12e7336994e..bef4eae033e 100644 --- a/tests/tck/features/index/Index.feature +++ b/tests/tck/features/index/Index.feature @@ -560,15 +560,15 @@ Feature: IndexTest_Vid_String LOOKUP ON tag_1 WHERE tag_1.col5 == 5 YIELD tag_1.col5, tag_1.col1 """ Then the result should be, in any order: - | VertexID | tag_1.col5 | tag_1.col1 | - | "100" | 5 | true | + | tag_1.col5 | tag_1.col1 | + | 5 | true | When executing query: """ LOOKUP ON tag_1 WHERE tag_1.col5 == 5 YIELD tag_1.col1, tag_1.col5 """ Then the result should be, in any order: - | VertexID | tag_1.col1 | tag_1.col5 | - | "100" | true | 5 | + | tag_1.col1 | tag_1.col5 | + | true | 5 | Then drop the used space Scenario: IndexTest RebuildTagIndexStatusInfo @@ -740,20 +740,20 @@ Feature: IndexTest_Vid_String | "rebuild_tag_space_all_tag_indexes" | "FINISHED" | When executing query: """ - LOOKUP ON id_tag WHERE id_tag.id == 100 + LOOKUP ON id_tag WHERE id_tag.id == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "100" | - | "200" | + | id | + | "100" | + | "200" | When executing query: """ - LOOKUP ON name_tag WHERE name_tag.name == "100" + LOOKUP ON name_tag WHERE name_tag.name == "100" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "300" | - | "400" | + | id | + | "300" | + | "400" | Then drop the used space Scenario: IndexTest rebuild all tag indexes by multi input @@ -795,26 +795,26 @@ Feature: IndexTest_Vid_String | "id_tag_index,name_tag_index" | "FINISHED" | When executing query: """ - LOOKUP ON id_tag WHERE id_tag.id == 100 + LOOKUP ON id_tag WHERE id_tag.id == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "100" | - | "200" | + | id | + | "100" | + | "200" | When executing query: """ - LOOKUP ON name_tag WHERE name_tag.name == "100" + LOOKUP ON name_tag WHERE name_tag.name == "100" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "300" | - | "400" | + | id | + | "300" | + | "400" | When executing query: """ - LOOKUP ON age_tag WHERE age_tag.age == 8 + LOOKUP ON age_tag WHERE age_tag.age == 8 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: IndexTest rebuild all edge indexes by empty input @@ -854,18 +854,18 @@ Feature: IndexTest_Vid_String | "rebuild_edge_space_all_edge_indexes" | "FINISHED" | When executing query: """ - LOOKUP ON id_edge WHERE id_edge.id == 100 + LOOKUP ON id_edge WHERE id_edge.id == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "100" | "200" | 0 | + | src | dst | rank | + | "100" | "200" | 0 | When executing query: """ - LOOKUP ON name_edge WHERE name_edge.name == "100" + LOOKUP ON name_edge WHERE name_edge.name == "100" YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "300" | "400" | 0 | + | src | dst | rank | + | "300" | "400" | 0 | Then drop the used space Scenario: IndexTest rebuild all edge indexes by multi input @@ -907,24 +907,24 @@ Feature: IndexTest_Vid_String | "id_edge_index,name_edge_index" | "FINISHED" | When executing query: """ - LOOKUP ON id_edge WHERE id_edge.id == 100 + LOOKUP ON id_edge WHERE id_edge.id == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "100" | "200" | 0 | + | src | dst | rank | + | "100" | "200" | 0 | When executing query: """ - LOOKUP ON name_edge WHERE name_edge.name == "100" + LOOKUP ON name_edge WHERE name_edge.name == "100" YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "300" | "400" | 0 | + | src | dst | rank | + | "300" | "400" | 0 | When executing query: """ - LOOKUP ON age_edge WHERE age_edge.age == 8 + LOOKUP ON age_edge WHERE age_edge.age == 8 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | Then drop the used space Scenario: show create tag index diff --git a/tests/tck/features/index/TagEdgeIndex.feature b/tests/tck/features/index/TagEdgeIndex.feature index 2412243eb56..bed05641186 100644 --- a/tests/tck/features/index/TagEdgeIndex.feature +++ b/tests/tck/features/index/TagEdgeIndex.feature @@ -105,23 +105,23 @@ Feature: tag and edge index tests from pytest LOOKUP ON tag_1 WHERE tag_1.col2 == 18 YIELD tag_1.col1 """ Then the result should be, in any order: - | VertexID | tag_1.col1 | - | '101' | 'Tom' | + | tag_1.col1 | + | 'Tom' | When executing query: """ LOOKUP ON tag_1 WHERE tag_1.col3 > 35.7 YIELD tag_1.col1 """ Then the result should be, in any order: - | VertexID | tag_1.col1 | - | '102' | 'Jerry' | - | '103' | 'Bob' | + | tag_1.col1 | + | 'Jerry' | + | 'Bob' | When executing query: """ LOOKUP ON tag_1 WHERE tag_1.col2 > 18 AND tag_1.col3 < 37.2 YIELD tag_1.col1 """ Then the result should be, in any order: - | VertexID | tag_1.col1 | - | '103' | 'Bob' | + | tag_1.col1 | + | 'Bob' | When executing query: """ DESC TAG INDEX single_tag_index @@ -343,23 +343,23 @@ Feature: tag and edge index tests from pytest LOOKUP ON edge_1 WHERE edge_1.col2 == 22 YIELD edge_1.col2 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2 | - | '102' | '103' | 0 | 22 | + | edge_1.col2 | + | 22 | When executing query: """ LOOKUP ON edge_1 WHERE edge_1.col3 > 43.4 YIELD edge_1.col1 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1 | - | '102' | '103' | 0 | 'Yellow' | - | '101' | '102' | 0 | 'Red' | + | edge_1.col1 | + | 'Yellow' | + | 'Red' | When executing query: """ LOOKUP ON edge_1 WHERE edge_1.col2 > 45 AND edge_1.col3 < 44.3 YIELD edge_1.col1 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1 | - | '103' | '101' | 0 | 'Blue' | + | edge_1.col1 | + | 'Blue' | # Describe Edge Index When executing query: """ diff --git a/tests/tck/features/insert/Insert.feature b/tests/tck/features/insert/Insert.feature index 884de390357..c30210c8d54 100644 --- a/tests/tck/features/insert/Insert.feature +++ b/tests/tck/features/insert/Insert.feature @@ -520,14 +520,14 @@ Feature: Insert string vid of vertex and edge LOOKUP on course YIELD course.name, course.introduce """ Then the result should be, in any order: - | VertexID | course.name | course.introduce | - | 'English' | 'Engli' | NULL | - | 'Math' | 'Math' | NULL | + | course.name | course.introduce | + | 'Engli' | NULL | + | 'Math' | NULL | When executing query: """ LOOKUP ON student YIELD student.name, student.age """ Then the result should be, in any order: - | VertexID | student.name | student.age | - | '' | 'Tom' | 12 | + | student.name | student.age | + | 'Tom' | 12 | Then drop the used space diff --git a/tests/tck/features/lookup/ByIndex.feature b/tests/tck/features/lookup/ByIndex.feature index f12621b80fd..b845ec3d249 100644 --- a/tests/tck/features/lookup/ByIndex.feature +++ b/tests/tck/features/lookup/ByIndex.feature @@ -7,10 +7,10 @@ Feature: Lookup by index itself Given a graph with space named "nba" When executing query: """ - LOOKUP ON team + LOOKUP ON team YIELD id(vertex) as teamID """ Then the result should be, in any order: - | VertexID | + | teamID | | 'Nets' | | 'Pistons' | | 'Bucks' | @@ -46,37 +46,37 @@ Feature: Lookup by index itself LOOKUP ON team YIELD team.name AS Name """ Then the result should be, in any order: - | VertexID | Name | - | 'Nets' | 'Nets' | - | 'Pistons' | 'Pistons' | - | 'Bucks' | 'Bucks' | - | 'Mavericks' | 'Mavericks' | - | 'Clippers' | 'Clippers' | - | 'Thunders' | 'Thunders' | - | 'Lakers' | 'Lakers' | - | 'Jazz' | 'Jazz' | - | 'Nuggets' | 'Nuggets' | - | 'Wizards' | 'Wizards' | - | 'Pacers' | 'Pacers' | - | 'Timberwolves' | 'Timberwolves' | - | 'Hawks' | 'Hawks' | - | 'Warriors' | 'Warriors' | - | 'Magic' | 'Magic' | - | 'Rockets' | 'Rockets' | - | 'Pelicans' | 'Pelicans' | - | 'Raptors' | 'Raptors' | - | 'Spurs' | 'Spurs' | - | 'Heat' | 'Heat' | - | 'Grizzlies' | 'Grizzlies' | - | 'Knicks' | 'Knicks' | - | 'Suns' | 'Suns' | - | 'Hornets' | 'Hornets' | - | 'Cavaliers' | 'Cavaliers' | - | 'Kings' | 'Kings' | - | 'Celtics' | 'Celtics' | - | '76ers' | '76ers' | - | 'Trail Blazers' | 'Trail Blazers' | - | 'Bulls' | 'Bulls' | + | Name | + | 'Nets' | + | 'Pistons' | + | 'Bucks' | + | 'Mavericks' | + | 'Clippers' | + | 'Thunders' | + | 'Lakers' | + | 'Jazz' | + | 'Nuggets' | + | 'Wizards' | + | 'Pacers' | + | 'Timberwolves' | + | 'Hawks' | + | 'Warriors' | + | 'Magic' | + | 'Rockets' | + | 'Pelicans' | + | 'Raptors' | + | 'Spurs' | + | 'Heat' | + | 'Grizzlies' | + | 'Knicks' | + | 'Suns' | + | 'Hornets' | + | 'Cavaliers' | + | 'Kings' | + | 'Celtics' | + | '76ers' | + | 'Trail Blazers' | + | 'Bulls' | Scenario: [1] Tag TODO Given a graph with space named "nba" @@ -92,12 +92,12 @@ Feature: Lookup by index itself Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON player WHERE player.age > 9223372036854775807+1 + LOOKUP ON player WHERE player.age > 9223372036854775807+1 YIELD player.name """ Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ - LOOKUP ON player WHERE player.age > -9223372036854775808-1 + LOOKUP ON player WHERE player.age > -9223372036854775808-1 YIELD player.name """ Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer @@ -105,326 +105,326 @@ Feature: Lookup by index itself Given a graph with space named "nba" When executing query: """ - LOOKUP ON serve + LOOKUP ON serve YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "Amar'e Stoudemire" | 'Suns' | 0 | - | "Amar'e Stoudemire" | 'Knicks' | 0 | - | "Amar'e Stoudemire" | 'Heat' | 0 | - | 'Russell Westbrook' | 'Thunders' | 0 | - | 'James Harden' | 'Thunders' | 0 | - | 'James Harden' | 'Rockets' | 0 | - | 'Kobe Bryant' | 'Lakers' | 0 | - | 'Tracy McGrady' | 'Raptors' | 0 | - | 'Tracy McGrady' | 'Magic' | 0 | - | 'Tracy McGrady' | 'Rockets' | 0 | - | 'Tracy McGrady' | 'Spurs' | 0 | - | 'Chris Paul' | 'Hornets' | 0 | - | 'Chris Paul' | 'Clippers' | 0 | - | 'Chris Paul' | 'Rockets' | 0 | - | 'Boris Diaw' | 'Hawks' | 0 | - | 'Boris Diaw' | 'Suns' | 0 | - | 'Boris Diaw' | 'Hornets' | 0 | - | 'Boris Diaw' | 'Spurs' | 0 | - | 'Boris Diaw' | 'Jazz' | 0 | - | 'LeBron James' | 'Cavaliers' | 0 | - | 'LeBron James' | 'Heat' | 0 | - | 'LeBron James' | 'Cavaliers' | 1 | - | 'LeBron James' | 'Lakers' | 0 | - | 'Klay Thompson' | 'Warriors' | 0 | - | 'Kristaps Porzingis' | 'Knicks' | 0 | - | 'Kristaps Porzingis' | 'Mavericks' | 0 | - | 'Jonathon Simmons' | 'Spurs' | 0 | - | 'Jonathon Simmons' | 'Magic' | 0 | - | 'Jonathon Simmons' | '76ers' | 0 | - | 'Marco Belinelli' | 'Warriors' | 0 | - | 'Marco Belinelli' | 'Raptors' | 0 | - | 'Marco Belinelli' | 'Hornets' | 0 | - | 'Marco Belinelli' | 'Bulls' | 0 | - | 'Marco Belinelli' | 'Spurs' | 0 | - | 'Marco Belinelli' | 'Kings' | 0 | - | 'Marco Belinelli' | 'Hornets' | 1 | - | 'Marco Belinelli' | 'Hawks' | 0 | - | 'Marco Belinelli' | '76ers' | 0 | - | 'Marco Belinelli' | 'Spurs' | 1 | - | 'Luka Doncic' | 'Mavericks' | 0 | - | 'David West' | 'Hornets' | 0 | - | 'David West' | 'Pacers' | 0 | - | 'David West' | 'Spurs' | 0 | - | 'David West' | 'Warriors' | 0 | - | 'Tony Parker' | 'Spurs' | 0 | - | 'Tony Parker' | 'Hornets' | 0 | - | 'Danny Green' | 'Cavaliers' | 0 | - | 'Danny Green' | 'Spurs' | 0 | - | 'Danny Green' | 'Raptors' | 0 | - | 'Rudy Gay' | 'Grizzlies' | 0 | - | 'Rudy Gay' | 'Raptors' | 0 | - | 'Rudy Gay' | 'Kings' | 0 | - | 'Rudy Gay' | 'Spurs' | 0 | - | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | - | 'LaMarcus Aldridge' | 'Spurs' | 0 | - | 'Tim Duncan' | 'Spurs' | 0 | - | 'Kevin Durant' | 'Thunders' | 0 | - | 'Kevin Durant' | 'Warriors' | 0 | - | 'Stephen Curry' | 'Warriors' | 0 | - | 'Ray Allen' | 'Bucks' | 0 | - | 'Ray Allen' | 'Thunders' | 0 | - | 'Ray Allen' | 'Celtics' | 0 | - | 'Ray Allen' | 'Heat' | 0 | - | 'Tiago Splitter' | 'Spurs' | 0 | - | 'Tiago Splitter' | 'Hawks' | 0 | - | 'Tiago Splitter' | '76ers' | 0 | - | 'DeAndre Jordan' | 'Clippers' | 0 | - | 'DeAndre Jordan' | 'Mavericks' | 0 | - | 'DeAndre Jordan' | 'Knicks' | 0 | - | 'Paul Gasol' | 'Grizzlies' | 0 | - | 'Paul Gasol' | 'Lakers' | 0 | - | 'Paul Gasol' | 'Bulls' | 0 | - | 'Paul Gasol' | 'Spurs' | 0 | - | 'Paul Gasol' | 'Bucks' | 0 | - | 'Aron Baynes' | 'Spurs' | 0 | - | 'Aron Baynes' | 'Pistons' | 0 | - | 'Aron Baynes' | 'Celtics' | 0 | - | 'Cory Joseph' | 'Spurs' | 0 | - | 'Cory Joseph' | 'Raptors' | 0 | - | 'Cory Joseph' | 'Pacers' | 0 | - | 'Vince Carter' | 'Raptors' | 0 | - | 'Vince Carter' | 'Nets' | 0 | - | 'Vince Carter' | 'Magic' | 0 | - | 'Vince Carter' | 'Suns' | 0 | - | 'Vince Carter' | 'Mavericks' | 0 | - | 'Vince Carter' | 'Grizzlies' | 0 | - | 'Vince Carter' | 'Kings' | 0 | - | 'Vince Carter' | 'Hawks' | 0 | - | 'Marc Gasol' | 'Grizzlies' | 0 | - | 'Marc Gasol' | 'Raptors' | 0 | - | 'Ricky Rubio' | 'Timberwolves' | 0 | - | 'Ricky Rubio' | 'Jazz' | 0 | - | 'Ben Simmons' | '76ers' | 0 | - | 'Giannis Antetokounmpo' | 'Bucks' | 0 | - | 'Rajon Rondo' | 'Celtics' | 0 | - | 'Rajon Rondo' | 'Mavericks' | 0 | - | 'Rajon Rondo' | 'Kings' | 0 | - | 'Rajon Rondo' | 'Bulls' | 0 | - | 'Rajon Rondo' | 'Pelicans' | 0 | - | 'Rajon Rondo' | 'Lakers' | 0 | - | 'Manu Ginobili' | 'Spurs' | 0 | - | 'Kyrie Irving' | 'Cavaliers' | 0 | - | 'Kyrie Irving' | 'Celtics' | 0 | - | 'Carmelo Anthony' | 'Nuggets' | 0 | - | 'Carmelo Anthony' | 'Knicks' | 0 | - | 'Carmelo Anthony' | 'Thunders' | 0 | - | 'Carmelo Anthony' | 'Rockets' | 0 | - | 'Dwyane Wade' | 'Heat' | 0 | - | 'Dwyane Wade' | 'Bulls' | 0 | - | 'Dwyane Wade' | 'Cavaliers' | 0 | - | 'Dwyane Wade' | 'Heat' | 1 | - | 'Joel Embiid' | '76ers' | 0 | - | 'Damian Lillard' | 'Trail Blazers' | 0 | - | 'Yao Ming' | 'Rockets' | 0 | - | 'Kyle Anderson' | 'Spurs' | 0 | - | 'Kyle Anderson' | 'Grizzlies' | 0 | - | 'Dejounte Murray' | 'Spurs' | 0 | - | 'Blake Griffin' | 'Clippers' | 0 | - | 'Blake Griffin' | 'Pistons' | 0 | - | 'Steve Nash' | 'Suns' | 0 | - | 'Steve Nash' | 'Mavericks' | 0 | - | 'Steve Nash' | 'Suns' | 1 | - | 'Steve Nash' | 'Lakers' | 0 | - | 'Jason Kidd' | 'Mavericks' | 0 | - | 'Jason Kidd' | 'Suns' | 0 | - | 'Jason Kidd' | 'Nets' | 0 | - | 'Jason Kidd' | 'Mavericks' | 1 | - | 'Jason Kidd' | 'Knicks' | 0 | - | 'Dirk Nowitzki' | 'Mavericks' | 0 | - | 'Paul George' | 'Pacers' | 0 | - | 'Paul George' | 'Thunders' | 0 | - | 'Grant Hill' | 'Pistons' | 0 | - | 'Grant Hill' | 'Magic' | 0 | - | 'Grant Hill' | 'Suns' | 0 | - | 'Grant Hill' | 'Clippers' | 0 | - | "Shaquille O'Neal" | 'Magic' | 0 | - | "Shaquille O'Neal" | 'Lakers' | 0 | - | "Shaquille O'Neal" | 'Heat' | 0 | - | "Shaquille O'Neal" | 'Suns' | 0 | - | "Shaquille O'Neal" | 'Cavaliers' | 0 | - | "Shaquille O'Neal" | 'Celtics' | 0 | - | 'JaVale McGee' | 'Wizards' | 0 | - | 'JaVale McGee' | 'Nuggets' | 0 | - | 'JaVale McGee' | 'Mavericks' | 0 | - | 'JaVale McGee' | 'Warriors' | 0 | - | 'JaVale McGee' | 'Lakers' | 0 | - | 'Dwight Howard' | 'Magic' | 0 | - | 'Dwight Howard' | 'Lakers' | 0 | - | 'Dwight Howard' | 'Rockets' | 0 | - | 'Dwight Howard' | 'Hawks' | 0 | - | 'Dwight Howard' | 'Hornets' | 0 | - | 'Dwight Howard' | 'Wizards' | 0 | + | src | dst | rank | + | "Amar'e Stoudemire" | 'Suns' | 0 | + | "Amar'e Stoudemire" | 'Knicks' | 0 | + | "Amar'e Stoudemire" | 'Heat' | 0 | + | 'Russell Westbrook' | 'Thunders' | 0 | + | 'James Harden' | 'Thunders' | 0 | + | 'James Harden' | 'Rockets' | 0 | + | 'Kobe Bryant' | 'Lakers' | 0 | + | 'Tracy McGrady' | 'Raptors' | 0 | + | 'Tracy McGrady' | 'Magic' | 0 | + | 'Tracy McGrady' | 'Rockets' | 0 | + | 'Tracy McGrady' | 'Spurs' | 0 | + | 'Chris Paul' | 'Hornets' | 0 | + | 'Chris Paul' | 'Clippers' | 0 | + | 'Chris Paul' | 'Rockets' | 0 | + | 'Boris Diaw' | 'Hawks' | 0 | + | 'Boris Diaw' | 'Suns' | 0 | + | 'Boris Diaw' | 'Hornets' | 0 | + | 'Boris Diaw' | 'Spurs' | 0 | + | 'Boris Diaw' | 'Jazz' | 0 | + | 'LeBron James' | 'Cavaliers' | 0 | + | 'LeBron James' | 'Heat' | 0 | + | 'LeBron James' | 'Cavaliers' | 1 | + | 'LeBron James' | 'Lakers' | 0 | + | 'Klay Thompson' | 'Warriors' | 0 | + | 'Kristaps Porzingis' | 'Knicks' | 0 | + | 'Kristaps Porzingis' | 'Mavericks' | 0 | + | 'Jonathon Simmons' | 'Spurs' | 0 | + | 'Jonathon Simmons' | 'Magic' | 0 | + | 'Jonathon Simmons' | '76ers' | 0 | + | 'Marco Belinelli' | 'Warriors' | 0 | + | 'Marco Belinelli' | 'Raptors' | 0 | + | 'Marco Belinelli' | 'Hornets' | 0 | + | 'Marco Belinelli' | 'Bulls' | 0 | + | 'Marco Belinelli' | 'Spurs' | 0 | + | 'Marco Belinelli' | 'Kings' | 0 | + | 'Marco Belinelli' | 'Hornets' | 1 | + | 'Marco Belinelli' | 'Hawks' | 0 | + | 'Marco Belinelli' | '76ers' | 0 | + | 'Marco Belinelli' | 'Spurs' | 1 | + | 'Luka Doncic' | 'Mavericks' | 0 | + | 'David West' | 'Hornets' | 0 | + | 'David West' | 'Pacers' | 0 | + | 'David West' | 'Spurs' | 0 | + | 'David West' | 'Warriors' | 0 | + | 'Tony Parker' | 'Spurs' | 0 | + | 'Tony Parker' | 'Hornets' | 0 | + | 'Danny Green' | 'Cavaliers' | 0 | + | 'Danny Green' | 'Spurs' | 0 | + | 'Danny Green' | 'Raptors' | 0 | + | 'Rudy Gay' | 'Grizzlies' | 0 | + | 'Rudy Gay' | 'Raptors' | 0 | + | 'Rudy Gay' | 'Kings' | 0 | + | 'Rudy Gay' | 'Spurs' | 0 | + | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | + | 'LaMarcus Aldridge' | 'Spurs' | 0 | + | 'Tim Duncan' | 'Spurs' | 0 | + | 'Kevin Durant' | 'Thunders' | 0 | + | 'Kevin Durant' | 'Warriors' | 0 | + | 'Stephen Curry' | 'Warriors' | 0 | + | 'Ray Allen' | 'Bucks' | 0 | + | 'Ray Allen' | 'Thunders' | 0 | + | 'Ray Allen' | 'Celtics' | 0 | + | 'Ray Allen' | 'Heat' | 0 | + | 'Tiago Splitter' | 'Spurs' | 0 | + | 'Tiago Splitter' | 'Hawks' | 0 | + | 'Tiago Splitter' | '76ers' | 0 | + | 'DeAndre Jordan' | 'Clippers' | 0 | + | 'DeAndre Jordan' | 'Mavericks' | 0 | + | 'DeAndre Jordan' | 'Knicks' | 0 | + | 'Paul Gasol' | 'Grizzlies' | 0 | + | 'Paul Gasol' | 'Lakers' | 0 | + | 'Paul Gasol' | 'Bulls' | 0 | + | 'Paul Gasol' | 'Spurs' | 0 | + | 'Paul Gasol' | 'Bucks' | 0 | + | 'Aron Baynes' | 'Spurs' | 0 | + | 'Aron Baynes' | 'Pistons' | 0 | + | 'Aron Baynes' | 'Celtics' | 0 | + | 'Cory Joseph' | 'Spurs' | 0 | + | 'Cory Joseph' | 'Raptors' | 0 | + | 'Cory Joseph' | 'Pacers' | 0 | + | 'Vince Carter' | 'Raptors' | 0 | + | 'Vince Carter' | 'Nets' | 0 | + | 'Vince Carter' | 'Magic' | 0 | + | 'Vince Carter' | 'Suns' | 0 | + | 'Vince Carter' | 'Mavericks' | 0 | + | 'Vince Carter' | 'Grizzlies' | 0 | + | 'Vince Carter' | 'Kings' | 0 | + | 'Vince Carter' | 'Hawks' | 0 | + | 'Marc Gasol' | 'Grizzlies' | 0 | + | 'Marc Gasol' | 'Raptors' | 0 | + | 'Ricky Rubio' | 'Timberwolves' | 0 | + | 'Ricky Rubio' | 'Jazz' | 0 | + | 'Ben Simmons' | '76ers' | 0 | + | 'Giannis Antetokounmpo' | 'Bucks' | 0 | + | 'Rajon Rondo' | 'Celtics' | 0 | + | 'Rajon Rondo' | 'Mavericks' | 0 | + | 'Rajon Rondo' | 'Kings' | 0 | + | 'Rajon Rondo' | 'Bulls' | 0 | + | 'Rajon Rondo' | 'Pelicans' | 0 | + | 'Rajon Rondo' | 'Lakers' | 0 | + | 'Manu Ginobili' | 'Spurs' | 0 | + | 'Kyrie Irving' | 'Cavaliers' | 0 | + | 'Kyrie Irving' | 'Celtics' | 0 | + | 'Carmelo Anthony' | 'Nuggets' | 0 | + | 'Carmelo Anthony' | 'Knicks' | 0 | + | 'Carmelo Anthony' | 'Thunders' | 0 | + | 'Carmelo Anthony' | 'Rockets' | 0 | + | 'Dwyane Wade' | 'Heat' | 0 | + | 'Dwyane Wade' | 'Bulls' | 0 | + | 'Dwyane Wade' | 'Cavaliers' | 0 | + | 'Dwyane Wade' | 'Heat' | 1 | + | 'Joel Embiid' | '76ers' | 0 | + | 'Damian Lillard' | 'Trail Blazers' | 0 | + | 'Yao Ming' | 'Rockets' | 0 | + | 'Kyle Anderson' | 'Spurs' | 0 | + | 'Kyle Anderson' | 'Grizzlies' | 0 | + | 'Dejounte Murray' | 'Spurs' | 0 | + | 'Blake Griffin' | 'Clippers' | 0 | + | 'Blake Griffin' | 'Pistons' | 0 | + | 'Steve Nash' | 'Suns' | 0 | + | 'Steve Nash' | 'Mavericks' | 0 | + | 'Steve Nash' | 'Suns' | 1 | + | 'Steve Nash' | 'Lakers' | 0 | + | 'Jason Kidd' | 'Mavericks' | 0 | + | 'Jason Kidd' | 'Suns' | 0 | + | 'Jason Kidd' | 'Nets' | 0 | + | 'Jason Kidd' | 'Mavericks' | 1 | + | 'Jason Kidd' | 'Knicks' | 0 | + | 'Dirk Nowitzki' | 'Mavericks' | 0 | + | 'Paul George' | 'Pacers' | 0 | + | 'Paul George' | 'Thunders' | 0 | + | 'Grant Hill' | 'Pistons' | 0 | + | 'Grant Hill' | 'Magic' | 0 | + | 'Grant Hill' | 'Suns' | 0 | + | 'Grant Hill' | 'Clippers' | 0 | + | "Shaquille O'Neal" | 'Magic' | 0 | + | "Shaquille O'Neal" | 'Lakers' | 0 | + | "Shaquille O'Neal" | 'Heat' | 0 | + | "Shaquille O'Neal" | 'Suns' | 0 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | + | "Shaquille O'Neal" | 'Celtics' | 0 | + | 'JaVale McGee' | 'Wizards' | 0 | + | 'JaVale McGee' | 'Nuggets' | 0 | + | 'JaVale McGee' | 'Mavericks' | 0 | + | 'JaVale McGee' | 'Warriors' | 0 | + | 'JaVale McGee' | 'Lakers' | 0 | + | 'Dwight Howard' | 'Magic' | 0 | + | 'Dwight Howard' | 'Lakers' | 0 | + | 'Dwight Howard' | 'Rockets' | 0 | + | 'Dwight Howard' | 'Hawks' | 0 | + | 'Dwight Howard' | 'Hornets' | 0 | + | 'Dwight Howard' | 'Wizards' | 0 | When executing query: """ LOOKUP ON serve YIELD serve.start_year AS startYear """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | startYear | - | "Amar'e Stoudemire" | 'Suns' | 0 | 2002 | - | "Amar'e Stoudemire" | 'Knicks' | 0 | 2010 | - | "Amar'e Stoudemire" | 'Heat' | 0 | 2015 | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'James Harden' | 'Thunders' | 0 | 2009 | - | 'James Harden' | 'Rockets' | 0 | 2012 | - | 'Kobe Bryant' | 'Lakers' | 0 | 1996 | - | 'Tracy McGrady' | 'Raptors' | 0 | 1997 | - | 'Tracy McGrady' | 'Magic' | 0 | 2000 | - | 'Tracy McGrady' | 'Rockets' | 0 | 2004 | - | 'Tracy McGrady' | 'Spurs' | 0 | 2013 | - | 'Chris Paul' | 'Hornets' | 0 | 2005 | - | 'Chris Paul' | 'Clippers' | 0 | 2011 | - | 'Chris Paul' | 'Rockets' | 0 | 2017 | - | 'Boris Diaw' | 'Hawks' | 0 | 2003 | - | 'Boris Diaw' | 'Suns' | 0 | 2005 | - | 'Boris Diaw' | 'Hornets' | 0 | 2008 | - | 'Boris Diaw' | 'Spurs' | 0 | 2012 | - | 'Boris Diaw' | 'Jazz' | 0 | 2016 | - | 'LeBron James' | 'Cavaliers' | 0 | 2003 | - | 'LeBron James' | 'Heat' | 0 | 2010 | - | 'LeBron James' | 'Cavaliers' | 1 | 2014 | - | 'LeBron James' | 'Lakers' | 0 | 2018 | - | 'Klay Thompson' | 'Warriors' | 0 | 2011 | - | 'Kristaps Porzingis' | 'Knicks' | 0 | 2015 | - | 'Kristaps Porzingis' | 'Mavericks' | 0 | 2019 | - | 'Jonathon Simmons' | 'Spurs' | 0 | 2015 | - | 'Jonathon Simmons' | 'Magic' | 0 | 2017 | - | 'Jonathon Simmons' | '76ers' | 0 | 2019 | - | 'Marco Belinelli' | 'Warriors' | 0 | 2007 | - | 'Marco Belinelli' | 'Raptors' | 0 | 2009 | - | 'Marco Belinelli' | 'Hornets' | 0 | 2010 | - | 'Marco Belinelli' | 'Bulls' | 0 | 2012 | - | 'Marco Belinelli' | 'Spurs' | 0 | 2013 | - | 'Marco Belinelli' | 'Kings' | 0 | 2015 | - | 'Marco Belinelli' | 'Hornets' | 1 | 2016 | - | 'Marco Belinelli' | 'Hawks' | 0 | 2017 | - | 'Marco Belinelli' | '76ers' | 0 | 2018 | - | 'Marco Belinelli' | 'Spurs' | 1 | 2018 | - | 'Luka Doncic' | 'Mavericks' | 0 | 2018 | - | 'David West' | 'Hornets' | 0 | 2003 | - | 'David West' | 'Pacers' | 0 | 2011 | - | 'David West' | 'Spurs' | 0 | 2015 | - | 'David West' | 'Warriors' | 0 | 2016 | - | 'Tony Parker' | 'Spurs' | 0 | 1999 | - | 'Tony Parker' | 'Hornets' | 0 | 2018 | - | 'Danny Green' | 'Cavaliers' | 0 | 2009 | - | 'Danny Green' | 'Spurs' | 0 | 2010 | - | 'Danny Green' | 'Raptors' | 0 | 2018 | - | 'Rudy Gay' | 'Grizzlies' | 0 | 2006 | - | 'Rudy Gay' | 'Raptors' | 0 | 2013 | - | 'Rudy Gay' | 'Kings' | 0 | 2013 | - | 'Rudy Gay' | 'Spurs' | 0 | 2017 | - | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | 2006 | - | 'LaMarcus Aldridge' | 'Spurs' | 0 | 2015 | - | 'Tim Duncan' | 'Spurs' | 0 | 1997 | - | 'Kevin Durant' | 'Thunders' | 0 | 2007 | - | 'Kevin Durant' | 'Warriors' | 0 | 2016 | - | 'Stephen Curry' | 'Warriors' | 0 | 2009 | - | 'Ray Allen' | 'Bucks' | 0 | 1996 | - | 'Ray Allen' | 'Thunders' | 0 | 2003 | - | 'Ray Allen' | 'Celtics' | 0 | 2007 | - | 'Ray Allen' | 'Heat' | 0 | 2012 | - | 'Tiago Splitter' | 'Spurs' | 0 | 2010 | - | 'Tiago Splitter' | 'Hawks' | 0 | 2015 | - | 'Tiago Splitter' | '76ers' | 0 | 2017 | - | 'DeAndre Jordan' | 'Clippers' | 0 | 2008 | - | 'DeAndre Jordan' | 'Mavericks' | 0 | 2018 | - | 'DeAndre Jordan' | 'Knicks' | 0 | 2019 | - | 'Paul Gasol' | 'Grizzlies' | 0 | 2001 | - | 'Paul Gasol' | 'Lakers' | 0 | 2008 | - | 'Paul Gasol' | 'Bulls' | 0 | 2014 | - | 'Paul Gasol' | 'Spurs' | 0 | 2016 | - | 'Paul Gasol' | 'Bucks' | 0 | 2019 | - | 'Aron Baynes' | 'Spurs' | 0 | 2013 | - | 'Aron Baynes' | 'Pistons' | 0 | 2015 | - | 'Aron Baynes' | 'Celtics' | 0 | 2017 | - | 'Cory Joseph' | 'Spurs' | 0 | 2011 | - | 'Cory Joseph' | 'Raptors' | 0 | 2015 | - | 'Cory Joseph' | 'Pacers' | 0 | 2017 | - | 'Vince Carter' | 'Raptors' | 0 | 1998 | - | 'Vince Carter' | 'Nets' | 0 | 2004 | - | 'Vince Carter' | 'Magic' | 0 | 2009 | - | 'Vince Carter' | 'Suns' | 0 | 2010 | - | 'Vince Carter' | 'Mavericks' | 0 | 2011 | - | 'Vince Carter' | 'Grizzlies' | 0 | 2014 | - | 'Vince Carter' | 'Kings' | 0 | 2017 | - | 'Vince Carter' | 'Hawks' | 0 | 2018 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | - | 'Marc Gasol' | 'Raptors' | 0 | 2019 | - | 'Ricky Rubio' | 'Timberwolves' | 0 | 2011 | - | 'Ricky Rubio' | 'Jazz' | 0 | 2017 | - | 'Ben Simmons' | '76ers' | 0 | 2016 | - | 'Giannis Antetokounmpo' | 'Bucks' | 0 | 2013 | - | 'Rajon Rondo' | 'Celtics' | 0 | 2006 | - | 'Rajon Rondo' | 'Mavericks' | 0 | 2014 | - | 'Rajon Rondo' | 'Kings' | 0 | 2015 | - | 'Rajon Rondo' | 'Bulls' | 0 | 2016 | - | 'Rajon Rondo' | 'Pelicans' | 0 | 2017 | - | 'Rajon Rondo' | 'Lakers' | 0 | 2018 | - | 'Manu Ginobili' | 'Spurs' | 0 | 2002 | - | 'Kyrie Irving' | 'Cavaliers' | 0 | 2011 | - | 'Kyrie Irving' | 'Celtics' | 0 | 2017 | - | 'Carmelo Anthony' | 'Nuggets' | 0 | 2003 | - | 'Carmelo Anthony' | 'Knicks' | 0 | 2011 | - | 'Carmelo Anthony' | 'Thunders' | 0 | 2017 | - | 'Carmelo Anthony' | 'Rockets' | 0 | 2018 | - | 'Dwyane Wade' | 'Heat' | 0 | 2003 | - | 'Dwyane Wade' | 'Bulls' | 0 | 2016 | - | 'Dwyane Wade' | 'Cavaliers' | 0 | 2017 | - | 'Dwyane Wade' | 'Heat' | 1 | 2018 | - | 'Joel Embiid' | '76ers' | 0 | 2014 | - | 'Damian Lillard' | 'Trail Blazers' | 0 | 2012 | - | 'Yao Ming' | 'Rockets' | 0 | 2002 | - | 'Kyle Anderson' | 'Spurs' | 0 | 2014 | - | 'Kyle Anderson' | 'Grizzlies' | 0 | 2018 | - | 'Dejounte Murray' | 'Spurs' | 0 | 2016 | - | 'Blake Griffin' | 'Clippers' | 0 | 2009 | - | 'Blake Griffin' | 'Pistons' | 0 | 2018 | - | 'Steve Nash' | 'Suns' | 0 | 1996 | - | 'Steve Nash' | 'Mavericks' | 0 | 1998 | - | 'Steve Nash' | 'Suns' | 1 | 2004 | - | 'Steve Nash' | 'Lakers' | 0 | 2012 | - | 'Jason Kidd' | 'Mavericks' | 0 | 1994 | - | 'Jason Kidd' | 'Suns' | 0 | 1996 | - | 'Jason Kidd' | 'Nets' | 0 | 2001 | - | 'Jason Kidd' | 'Mavericks' | 1 | 2008 | - | 'Jason Kidd' | 'Knicks' | 0 | 2012 | - | 'Dirk Nowitzki' | 'Mavericks' | 0 | 1998 | - | 'Paul George' | 'Pacers' | 0 | 2010 | - | 'Paul George' | 'Thunders' | 0 | 2017 | - | 'Grant Hill' | 'Pistons' | 0 | 1994 | - | 'Grant Hill' | 'Magic' | 0 | 2000 | - | 'Grant Hill' | 'Suns' | 0 | 2007 | - | 'Grant Hill' | 'Clippers' | 0 | 2012 | - | "Shaquille O'Neal" | 'Magic' | 0 | 1992 | - | "Shaquille O'Neal" | 'Lakers' | 0 | 1996 | - | "Shaquille O'Neal" | 'Heat' | 0 | 2004 | - | "Shaquille O'Neal" | 'Suns' | 0 | 2008 | - | "Shaquille O'Neal" | 'Cavaliers' | 0 | 2009 | - | "Shaquille O'Neal" | 'Celtics' | 0 | 2010 | - | 'JaVale McGee' | 'Wizards' | 0 | 2008 | - | 'JaVale McGee' | 'Nuggets' | 0 | 2012 | - | 'JaVale McGee' | 'Mavericks' | 0 | 2015 | - | 'JaVale McGee' | 'Warriors' | 0 | 2016 | - | 'JaVale McGee' | 'Lakers' | 0 | 2018 | - | 'Dwight Howard' | 'Magic' | 0 | 2004 | - | 'Dwight Howard' | 'Lakers' | 0 | 2012 | - | 'Dwight Howard' | 'Rockets' | 0 | 2013 | - | 'Dwight Howard' | 'Hawks' | 0 | 2016 | - | 'Dwight Howard' | 'Hornets' | 0 | 2017 | - | 'Dwight Howard' | 'Wizards' | 0 | 2018 | + | startYear | + | 2002 | + | 2010 | + | 2015 | + | 2008 | + | 2009 | + | 2012 | + | 1996 | + | 1997 | + | 2000 | + | 2004 | + | 2013 | + | 2005 | + | 2011 | + | 2017 | + | 2003 | + | 2005 | + | 2008 | + | 2012 | + | 2016 | + | 2003 | + | 2010 | + | 2014 | + | 2018 | + | 2011 | + | 2015 | + | 2019 | + | 2015 | + | 2017 | + | 2019 | + | 2007 | + | 2009 | + | 2010 | + | 2012 | + | 2013 | + | 2015 | + | 2016 | + | 2017 | + | 2018 | + | 2018 | + | 2018 | + | 2003 | + | 2011 | + | 2015 | + | 2016 | + | 1999 | + | 2018 | + | 2009 | + | 2010 | + | 2018 | + | 2006 | + | 2013 | + | 2013 | + | 2017 | + | 2006 | + | 2015 | + | 1997 | + | 2007 | + | 2016 | + | 2009 | + | 1996 | + | 2003 | + | 2007 | + | 2012 | + | 2010 | + | 2015 | + | 2017 | + | 2008 | + | 2018 | + | 2019 | + | 2001 | + | 2008 | + | 2014 | + | 2016 | + | 2019 | + | 2013 | + | 2015 | + | 2017 | + | 2011 | + | 2015 | + | 2017 | + | 1998 | + | 2004 | + | 2009 | + | 2010 | + | 2011 | + | 2014 | + | 2017 | + | 2018 | + | 2008 | + | 2019 | + | 2011 | + | 2017 | + | 2016 | + | 2013 | + | 2006 | + | 2014 | + | 2015 | + | 2016 | + | 2017 | + | 2018 | + | 2002 | + | 2011 | + | 2017 | + | 2003 | + | 2011 | + | 2017 | + | 2018 | + | 2003 | + | 2016 | + | 2017 | + | 2018 | + | 2014 | + | 2012 | + | 2002 | + | 2014 | + | 2018 | + | 2016 | + | 2009 | + | 2018 | + | 1996 | + | 1998 | + | 2004 | + | 2012 | + | 1994 | + | 1996 | + | 2001 | + | 2008 | + | 2012 | + | 1998 | + | 2010 | + | 2017 | + | 1994 | + | 2000 | + | 2007 | + | 2012 | + | 1992 | + | 1996 | + | 2004 | + | 2008 | + | 2009 | + | 2010 | + | 2008 | + | 2012 | + | 2015 | + | 2016 | + | 2018 | + | 2004 | + | 2012 | + | 2013 | + | 2016 | + | 2017 | + | 2018 | Scenario: [2] Edge TODO Given a graph with space named "nba" When executing query: """ - LOOKUP ON serve WHERE 1 + 1 == 2 + LOOKUP ON serve WHERE 1 + 1 == 2 YIELD serve.start_year """ Then a SemanticError should be raised at runtime: When executing query: @@ -434,7 +434,7 @@ Feature: Lookup by index itself Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON serve WHERE serve.start_year == serve.end_year + LOOKUP ON serve WHERE serve.start_year == serve.end_year YIELD edge as e """ Then a SemanticError should be raised at runtime: When executing query: @@ -447,18 +447,18 @@ Feature: Lookup by index itself Given a graph with space named "nba" When executing query: """ - LOOKUP ON player WHERE player.age == 40 YIELD player.age AS Age + LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | + | name | Age | | "Dirk Nowitzki" | 40 | | "Kobe Bryant" | 40 | When executing query: """ - LOOKUP ON player WHERE player.age > 40 YIELD player.age AS Age + LOOKUP ON player WHERE player.age > 40 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -469,10 +469,10 @@ Feature: Lookup by index itself | "Vince Carter" | 42 | When executing query: """ - LOOKUP ON player WHERE player.age >= 40.0 YIELD player.age AS Age + LOOKUP ON player WHERE player.age >= 40.0 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -485,10 +485,10 @@ Feature: Lookup by index itself | "Kobe Bryant" | 40 | When executing query: """ - LOOKUP ON player WHERE player.age > 40.5 YIELD player.age AS Age + LOOKUP ON player WHERE player.age > 40.5 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -499,10 +499,10 @@ Feature: Lookup by index itself | "Vince Carter" | 42 | When executing query: """ - LOOKUP ON player WHERE player.age >= 40.5 YIELD player.age AS Age + LOOKUP ON player WHERE player.age >= 40.5 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -517,34 +517,34 @@ Feature: Lookup by index itself YIELD player.age AS Age, player.name AS Name | order by $-.Age DESC, $-.Name| limit 10 """ Then the result should be, in order, with relax comparison: - | VertexID | Age | Name | - | "Tracy McGrady" | 39 | "Tracy McGrady" | - | "David West" | 38 | "David West" | - | "Paul Gasol" | 38 | "Paul Gasol" | - | "Yao Ming" | 38 | "Yao Ming" | - | "Dwyane Wade" | 37 | "Dwyane Wade" | - | "Amar'e Stoudemire" | 36 | "Amar'e Stoudemire" | - | "Boris Diaw" | 36 | "Boris Diaw" | - | "Tony Parker" | 36 | "Tony Parker" | - | "Carmelo Anthony" | 34 | "Carmelo Anthony" | - | "LeBron James" | 34 | "LeBron James" | + | Age | Name | + | 39 | "Tracy McGrady" | + | 38 | "David West" | + | 38 | "Paul Gasol" | + | 38 | "Yao Ming" | + | 37 | "Dwyane Wade" | + | 36 | "Amar'e Stoudemire" | + | 36 | "Boris Diaw" | + | 36 | "Tony Parker" | + | 34 | "Carmelo Anthony" | + | 34 | "LeBron James" | When executing query: """ LOOKUP ON player WHERE player.age <= 40 YIELD player.age AS Age, player.name AS Name | order by $-.Age DESC, $-.Name| limit 10 """ Then the result should be, in order, with relax comparison: - | VertexID | Age | Name | - | "Dirk Nowitzki" | 40 | "Dirk Nowitzki" | - | "Kobe Bryant" | 40 | "Kobe Bryant" | - | "Tracy McGrady" | 39 | "Tracy McGrady" | - | "David West" | 38 | "David West" | - | "Paul Gasol" | 38 | "Paul Gasol" | - | "Yao Ming" | 38 | "Yao Ming" | - | "Dwyane Wade" | 37 | "Dwyane Wade" | - | "Amar'e Stoudemire" | 36 | "Amar'e Stoudemire" | - | "Boris Diaw" | 36 | "Boris Diaw" | - | "Tony Parker" | 36 | "Tony Parker" | + | Age | Name | + | 40 | "Dirk Nowitzki" | + | 40 | "Kobe Bryant" | + | 39 | "Tracy McGrady" | + | 38 | "David West" | + | 38 | "Paul Gasol" | + | 38 | "Yao Ming" | + | 37 | "Dwyane Wade" | + | 36 | "Amar'e Stoudemire" | + | 36 | "Boris Diaw" | + | 36 | "Tony Parker" | Scenario: [2] Compare INT and FLOAT during IndexScan Given an empty graph @@ -563,26 +563,26 @@ Feature: Lookup by index itself Then the execution should be successful When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT > 70; + LOOKUP ON weight WHERE weight.WEIGHT > 70 YIELD id(vertex) as name; """ Then the result should be, in any order: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT > 70.4; + LOOKUP ON weight WHERE weight.WEIGHT > 70.4 YIELD id(vertex) as name; """ Then the result should be, in any order: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT >= 70.5; + LOOKUP ON weight WHERE weight.WEIGHT >= 70.5 YIELD id(vertex) as name; """ Then the result should be, in any order: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | Then drop the used space diff --git a/tests/tck/features/lookup/ByIndex.intVid.feature b/tests/tck/features/lookup/ByIndex.intVid.feature index 5dd9e4aaa5c..32e74287447 100644 --- a/tests/tck/features/lookup/ByIndex.intVid.feature +++ b/tests/tck/features/lookup/ByIndex.intVid.feature @@ -7,10 +7,10 @@ Feature: Lookup by index itself in integer vid Given a graph with space named "nba_int_vid" When executing query: """ - LOOKUP ON team + LOOKUP ON team YIELD id(vertex) as name """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | name | | 'Nets' | | 'Pistons' | | 'Bucks' | @@ -43,10 +43,10 @@ Feature: Lookup by index itself in integer vid | 'Bulls' | When executing query: """ - LOOKUP ON team YIELD team.name AS Name + LOOKUP ON team YIELD id(vertex) as id, team.name AS Name """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Name | + | id | Name | | 'Nets' | 'Nets' | | 'Pistons' | 'Pistons' | | 'Bucks' | 'Bucks' | @@ -82,7 +82,7 @@ Feature: Lookup by index itself in integer vid Given a graph with space named "nba_int_vid" When executing query: """ - LOOKUP ON team WHERE 1 + 1 == 2 + LOOKUP ON team WHERE 1 + 1 == 2 YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: @@ -92,12 +92,12 @@ Feature: Lookup by index itself in integer vid Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON player WHERE player.age > 9223372036854775807+1 + LOOKUP ON player WHERE player.age > 9223372036854775807+1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: result of (9223372036854775807+1) cannot be represented as an integer When executing query: """ - LOOKUP ON player WHERE player.age > -9223372036854775808-1 + LOOKUP ON player WHERE player.age > -9223372036854775808-1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: result of (-9223372036854775808-1) cannot be represented as an integer @@ -105,326 +105,326 @@ Feature: Lookup by index itself in integer vid Given a graph with space named "nba_int_vid" When executing query: """ - LOOKUP ON serve + LOOKUP ON serve YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | - | "Amar'e Stoudemire" | 'Suns' | 0 | - | "Amar'e Stoudemire" | 'Knicks' | 0 | - | "Amar'e Stoudemire" | 'Heat' | 0 | - | 'Russell Westbrook' | 'Thunders' | 0 | - | 'James Harden' | 'Thunders' | 0 | - | 'James Harden' | 'Rockets' | 0 | - | 'Kobe Bryant' | 'Lakers' | 0 | - | 'Tracy McGrady' | 'Raptors' | 0 | - | 'Tracy McGrady' | 'Magic' | 0 | - | 'Tracy McGrady' | 'Rockets' | 0 | - | 'Tracy McGrady' | 'Spurs' | 0 | - | 'Chris Paul' | 'Hornets' | 0 | - | 'Chris Paul' | 'Clippers' | 0 | - | 'Chris Paul' | 'Rockets' | 0 | - | 'Boris Diaw' | 'Hawks' | 0 | - | 'Boris Diaw' | 'Suns' | 0 | - | 'Boris Diaw' | 'Hornets' | 0 | - | 'Boris Diaw' | 'Spurs' | 0 | - | 'Boris Diaw' | 'Jazz' | 0 | - | 'LeBron James' | 'Cavaliers' | 0 | - | 'LeBron James' | 'Heat' | 0 | - | 'LeBron James' | 'Cavaliers' | 1 | - | 'LeBron James' | 'Lakers' | 0 | - | 'Klay Thompson' | 'Warriors' | 0 | - | 'Kristaps Porzingis' | 'Knicks' | 0 | - | 'Kristaps Porzingis' | 'Mavericks' | 0 | - | 'Jonathon Simmons' | 'Spurs' | 0 | - | 'Jonathon Simmons' | 'Magic' | 0 | - | 'Jonathon Simmons' | '76ers' | 0 | - | 'Marco Belinelli' | 'Warriors' | 0 | - | 'Marco Belinelli' | 'Raptors' | 0 | - | 'Marco Belinelli' | 'Hornets' | 0 | - | 'Marco Belinelli' | 'Bulls' | 0 | - | 'Marco Belinelli' | 'Spurs' | 0 | - | 'Marco Belinelli' | 'Kings' | 0 | - | 'Marco Belinelli' | 'Hornets' | 1 | - | 'Marco Belinelli' | 'Hawks' | 0 | - | 'Marco Belinelli' | '76ers' | 0 | - | 'Marco Belinelli' | 'Spurs' | 1 | - | 'Luka Doncic' | 'Mavericks' | 0 | - | 'David West' | 'Hornets' | 0 | - | 'David West' | 'Pacers' | 0 | - | 'David West' | 'Spurs' | 0 | - | 'David West' | 'Warriors' | 0 | - | 'Tony Parker' | 'Spurs' | 0 | - | 'Tony Parker' | 'Hornets' | 0 | - | 'Danny Green' | 'Cavaliers' | 0 | - | 'Danny Green' | 'Spurs' | 0 | - | 'Danny Green' | 'Raptors' | 0 | - | 'Rudy Gay' | 'Grizzlies' | 0 | - | 'Rudy Gay' | 'Raptors' | 0 | - | 'Rudy Gay' | 'Kings' | 0 | - | 'Rudy Gay' | 'Spurs' | 0 | - | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | - | 'LaMarcus Aldridge' | 'Spurs' | 0 | - | 'Tim Duncan' | 'Spurs' | 0 | - | 'Kevin Durant' | 'Thunders' | 0 | - | 'Kevin Durant' | 'Warriors' | 0 | - | 'Stephen Curry' | 'Warriors' | 0 | - | 'Ray Allen' | 'Bucks' | 0 | - | 'Ray Allen' | 'Thunders' | 0 | - | 'Ray Allen' | 'Celtics' | 0 | - | 'Ray Allen' | 'Heat' | 0 | - | 'Tiago Splitter' | 'Spurs' | 0 | - | 'Tiago Splitter' | 'Hawks' | 0 | - | 'Tiago Splitter' | '76ers' | 0 | - | 'DeAndre Jordan' | 'Clippers' | 0 | - | 'DeAndre Jordan' | 'Mavericks' | 0 | - | 'DeAndre Jordan' | 'Knicks' | 0 | - | 'Paul Gasol' | 'Grizzlies' | 0 | - | 'Paul Gasol' | 'Lakers' | 0 | - | 'Paul Gasol' | 'Bulls' | 0 | - | 'Paul Gasol' | 'Spurs' | 0 | - | 'Paul Gasol' | 'Bucks' | 0 | - | 'Aron Baynes' | 'Spurs' | 0 | - | 'Aron Baynes' | 'Pistons' | 0 | - | 'Aron Baynes' | 'Celtics' | 0 | - | 'Cory Joseph' | 'Spurs' | 0 | - | 'Cory Joseph' | 'Raptors' | 0 | - | 'Cory Joseph' | 'Pacers' | 0 | - | 'Vince Carter' | 'Raptors' | 0 | - | 'Vince Carter' | 'Nets' | 0 | - | 'Vince Carter' | 'Magic' | 0 | - | 'Vince Carter' | 'Suns' | 0 | - | 'Vince Carter' | 'Mavericks' | 0 | - | 'Vince Carter' | 'Grizzlies' | 0 | - | 'Vince Carter' | 'Kings' | 0 | - | 'Vince Carter' | 'Hawks' | 0 | - | 'Marc Gasol' | 'Grizzlies' | 0 | - | 'Marc Gasol' | 'Raptors' | 0 | - | 'Ricky Rubio' | 'Timberwolves' | 0 | - | 'Ricky Rubio' | 'Jazz' | 0 | - | 'Ben Simmons' | '76ers' | 0 | - | 'Giannis Antetokounmpo' | 'Bucks' | 0 | - | 'Rajon Rondo' | 'Celtics' | 0 | - | 'Rajon Rondo' | 'Mavericks' | 0 | - | 'Rajon Rondo' | 'Kings' | 0 | - | 'Rajon Rondo' | 'Bulls' | 0 | - | 'Rajon Rondo' | 'Pelicans' | 0 | - | 'Rajon Rondo' | 'Lakers' | 0 | - | 'Manu Ginobili' | 'Spurs' | 0 | - | 'Kyrie Irving' | 'Cavaliers' | 0 | - | 'Kyrie Irving' | 'Celtics' | 0 | - | 'Carmelo Anthony' | 'Nuggets' | 0 | - | 'Carmelo Anthony' | 'Knicks' | 0 | - | 'Carmelo Anthony' | 'Thunders' | 0 | - | 'Carmelo Anthony' | 'Rockets' | 0 | - | 'Dwyane Wade' | 'Heat' | 0 | - | 'Dwyane Wade' | 'Bulls' | 0 | - | 'Dwyane Wade' | 'Cavaliers' | 0 | - | 'Dwyane Wade' | 'Heat' | 1 | - | 'Joel Embiid' | '76ers' | 0 | - | 'Damian Lillard' | 'Trail Blazers' | 0 | - | 'Yao Ming' | 'Rockets' | 0 | - | 'Kyle Anderson' | 'Spurs' | 0 | - | 'Kyle Anderson' | 'Grizzlies' | 0 | - | 'Dejounte Murray' | 'Spurs' | 0 | - | 'Blake Griffin' | 'Clippers' | 0 | - | 'Blake Griffin' | 'Pistons' | 0 | - | 'Steve Nash' | 'Suns' | 0 | - | 'Steve Nash' | 'Mavericks' | 0 | - | 'Steve Nash' | 'Suns' | 1 | - | 'Steve Nash' | 'Lakers' | 0 | - | 'Jason Kidd' | 'Mavericks' | 0 | - | 'Jason Kidd' | 'Suns' | 0 | - | 'Jason Kidd' | 'Nets' | 0 | - | 'Jason Kidd' | 'Mavericks' | 1 | - | 'Jason Kidd' | 'Knicks' | 0 | - | 'Dirk Nowitzki' | 'Mavericks' | 0 | - | 'Paul George' | 'Pacers' | 0 | - | 'Paul George' | 'Thunders' | 0 | - | 'Grant Hill' | 'Pistons' | 0 | - | 'Grant Hill' | 'Magic' | 0 | - | 'Grant Hill' | 'Suns' | 0 | - | 'Grant Hill' | 'Clippers' | 0 | - | "Shaquille O'Neal" | 'Magic' | 0 | - | "Shaquille O'Neal" | 'Lakers' | 0 | - | "Shaquille O'Neal" | 'Heat' | 0 | - | "Shaquille O'Neal" | 'Suns' | 0 | - | "Shaquille O'Neal" | 'Cavaliers' | 0 | - | "Shaquille O'Neal" | 'Celtics' | 0 | - | 'JaVale McGee' | 'Wizards' | 0 | - | 'JaVale McGee' | 'Nuggets' | 0 | - | 'JaVale McGee' | 'Mavericks' | 0 | - | 'JaVale McGee' | 'Warriors' | 0 | - | 'JaVale McGee' | 'Lakers' | 0 | - | 'Dwight Howard' | 'Magic' | 0 | - | 'Dwight Howard' | 'Lakers' | 0 | - | 'Dwight Howard' | 'Rockets' | 0 | - | 'Dwight Howard' | 'Hawks' | 0 | - | 'Dwight Howard' | 'Hornets' | 0 | - | 'Dwight Howard' | 'Wizards' | 0 | + | src | dst | rank | + | "Amar'e Stoudemire" | 'Suns' | 0 | + | "Amar'e Stoudemire" | 'Knicks' | 0 | + | "Amar'e Stoudemire" | 'Heat' | 0 | + | 'Russell Westbrook' | 'Thunders' | 0 | + | 'James Harden' | 'Thunders' | 0 | + | 'James Harden' | 'Rockets' | 0 | + | 'Kobe Bryant' | 'Lakers' | 0 | + | 'Tracy McGrady' | 'Raptors' | 0 | + | 'Tracy McGrady' | 'Magic' | 0 | + | 'Tracy McGrady' | 'Rockets' | 0 | + | 'Tracy McGrady' | 'Spurs' | 0 | + | 'Chris Paul' | 'Hornets' | 0 | + | 'Chris Paul' | 'Clippers' | 0 | + | 'Chris Paul' | 'Rockets' | 0 | + | 'Boris Diaw' | 'Hawks' | 0 | + | 'Boris Diaw' | 'Suns' | 0 | + | 'Boris Diaw' | 'Hornets' | 0 | + | 'Boris Diaw' | 'Spurs' | 0 | + | 'Boris Diaw' | 'Jazz' | 0 | + | 'LeBron James' | 'Cavaliers' | 0 | + | 'LeBron James' | 'Heat' | 0 | + | 'LeBron James' | 'Cavaliers' | 1 | + | 'LeBron James' | 'Lakers' | 0 | + | 'Klay Thompson' | 'Warriors' | 0 | + | 'Kristaps Porzingis' | 'Knicks' | 0 | + | 'Kristaps Porzingis' | 'Mavericks' | 0 | + | 'Jonathon Simmons' | 'Spurs' | 0 | + | 'Jonathon Simmons' | 'Magic' | 0 | + | 'Jonathon Simmons' | '76ers' | 0 | + | 'Marco Belinelli' | 'Warriors' | 0 | + | 'Marco Belinelli' | 'Raptors' | 0 | + | 'Marco Belinelli' | 'Hornets' | 0 | + | 'Marco Belinelli' | 'Bulls' | 0 | + | 'Marco Belinelli' | 'Spurs' | 0 | + | 'Marco Belinelli' | 'Kings' | 0 | + | 'Marco Belinelli' | 'Hornets' | 1 | + | 'Marco Belinelli' | 'Hawks' | 0 | + | 'Marco Belinelli' | '76ers' | 0 | + | 'Marco Belinelli' | 'Spurs' | 1 | + | 'Luka Doncic' | 'Mavericks' | 0 | + | 'David West' | 'Hornets' | 0 | + | 'David West' | 'Pacers' | 0 | + | 'David West' | 'Spurs' | 0 | + | 'David West' | 'Warriors' | 0 | + | 'Tony Parker' | 'Spurs' | 0 | + | 'Tony Parker' | 'Hornets' | 0 | + | 'Danny Green' | 'Cavaliers' | 0 | + | 'Danny Green' | 'Spurs' | 0 | + | 'Danny Green' | 'Raptors' | 0 | + | 'Rudy Gay' | 'Grizzlies' | 0 | + | 'Rudy Gay' | 'Raptors' | 0 | + | 'Rudy Gay' | 'Kings' | 0 | + | 'Rudy Gay' | 'Spurs' | 0 | + | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | + | 'LaMarcus Aldridge' | 'Spurs' | 0 | + | 'Tim Duncan' | 'Spurs' | 0 | + | 'Kevin Durant' | 'Thunders' | 0 | + | 'Kevin Durant' | 'Warriors' | 0 | + | 'Stephen Curry' | 'Warriors' | 0 | + | 'Ray Allen' | 'Bucks' | 0 | + | 'Ray Allen' | 'Thunders' | 0 | + | 'Ray Allen' | 'Celtics' | 0 | + | 'Ray Allen' | 'Heat' | 0 | + | 'Tiago Splitter' | 'Spurs' | 0 | + | 'Tiago Splitter' | 'Hawks' | 0 | + | 'Tiago Splitter' | '76ers' | 0 | + | 'DeAndre Jordan' | 'Clippers' | 0 | + | 'DeAndre Jordan' | 'Mavericks' | 0 | + | 'DeAndre Jordan' | 'Knicks' | 0 | + | 'Paul Gasol' | 'Grizzlies' | 0 | + | 'Paul Gasol' | 'Lakers' | 0 | + | 'Paul Gasol' | 'Bulls' | 0 | + | 'Paul Gasol' | 'Spurs' | 0 | + | 'Paul Gasol' | 'Bucks' | 0 | + | 'Aron Baynes' | 'Spurs' | 0 | + | 'Aron Baynes' | 'Pistons' | 0 | + | 'Aron Baynes' | 'Celtics' | 0 | + | 'Cory Joseph' | 'Spurs' | 0 | + | 'Cory Joseph' | 'Raptors' | 0 | + | 'Cory Joseph' | 'Pacers' | 0 | + | 'Vince Carter' | 'Raptors' | 0 | + | 'Vince Carter' | 'Nets' | 0 | + | 'Vince Carter' | 'Magic' | 0 | + | 'Vince Carter' | 'Suns' | 0 | + | 'Vince Carter' | 'Mavericks' | 0 | + | 'Vince Carter' | 'Grizzlies' | 0 | + | 'Vince Carter' | 'Kings' | 0 | + | 'Vince Carter' | 'Hawks' | 0 | + | 'Marc Gasol' | 'Grizzlies' | 0 | + | 'Marc Gasol' | 'Raptors' | 0 | + | 'Ricky Rubio' | 'Timberwolves' | 0 | + | 'Ricky Rubio' | 'Jazz' | 0 | + | 'Ben Simmons' | '76ers' | 0 | + | 'Giannis Antetokounmpo' | 'Bucks' | 0 | + | 'Rajon Rondo' | 'Celtics' | 0 | + | 'Rajon Rondo' | 'Mavericks' | 0 | + | 'Rajon Rondo' | 'Kings' | 0 | + | 'Rajon Rondo' | 'Bulls' | 0 | + | 'Rajon Rondo' | 'Pelicans' | 0 | + | 'Rajon Rondo' | 'Lakers' | 0 | + | 'Manu Ginobili' | 'Spurs' | 0 | + | 'Kyrie Irving' | 'Cavaliers' | 0 | + | 'Kyrie Irving' | 'Celtics' | 0 | + | 'Carmelo Anthony' | 'Nuggets' | 0 | + | 'Carmelo Anthony' | 'Knicks' | 0 | + | 'Carmelo Anthony' | 'Thunders' | 0 | + | 'Carmelo Anthony' | 'Rockets' | 0 | + | 'Dwyane Wade' | 'Heat' | 0 | + | 'Dwyane Wade' | 'Bulls' | 0 | + | 'Dwyane Wade' | 'Cavaliers' | 0 | + | 'Dwyane Wade' | 'Heat' | 1 | + | 'Joel Embiid' | '76ers' | 0 | + | 'Damian Lillard' | 'Trail Blazers' | 0 | + | 'Yao Ming' | 'Rockets' | 0 | + | 'Kyle Anderson' | 'Spurs' | 0 | + | 'Kyle Anderson' | 'Grizzlies' | 0 | + | 'Dejounte Murray' | 'Spurs' | 0 | + | 'Blake Griffin' | 'Clippers' | 0 | + | 'Blake Griffin' | 'Pistons' | 0 | + | 'Steve Nash' | 'Suns' | 0 | + | 'Steve Nash' | 'Mavericks' | 0 | + | 'Steve Nash' | 'Suns' | 1 | + | 'Steve Nash' | 'Lakers' | 0 | + | 'Jason Kidd' | 'Mavericks' | 0 | + | 'Jason Kidd' | 'Suns' | 0 | + | 'Jason Kidd' | 'Nets' | 0 | + | 'Jason Kidd' | 'Mavericks' | 1 | + | 'Jason Kidd' | 'Knicks' | 0 | + | 'Dirk Nowitzki' | 'Mavericks' | 0 | + | 'Paul George' | 'Pacers' | 0 | + | 'Paul George' | 'Thunders' | 0 | + | 'Grant Hill' | 'Pistons' | 0 | + | 'Grant Hill' | 'Magic' | 0 | + | 'Grant Hill' | 'Suns' | 0 | + | 'Grant Hill' | 'Clippers' | 0 | + | "Shaquille O'Neal" | 'Magic' | 0 | + | "Shaquille O'Neal" | 'Lakers' | 0 | + | "Shaquille O'Neal" | 'Heat' | 0 | + | "Shaquille O'Neal" | 'Suns' | 0 | + | "Shaquille O'Neal" | 'Cavaliers' | 0 | + | "Shaquille O'Neal" | 'Celtics' | 0 | + | 'JaVale McGee' | 'Wizards' | 0 | + | 'JaVale McGee' | 'Nuggets' | 0 | + | 'JaVale McGee' | 'Mavericks' | 0 | + | 'JaVale McGee' | 'Warriors' | 0 | + | 'JaVale McGee' | 'Lakers' | 0 | + | 'Dwight Howard' | 'Magic' | 0 | + | 'Dwight Howard' | 'Lakers' | 0 | + | 'Dwight Howard' | 'Rockets' | 0 | + | 'Dwight Howard' | 'Hawks' | 0 | + | 'Dwight Howard' | 'Hornets' | 0 | + | 'Dwight Howard' | 'Wizards' | 0 | When executing query: """ LOOKUP ON serve YIELD serve.start_year AS startYear """ - Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | startYear | - | "Amar'e Stoudemire" | 'Suns' | 0 | 2002 | - | "Amar'e Stoudemire" | 'Knicks' | 0 | 2010 | - | "Amar'e Stoudemire" | 'Heat' | 0 | 2015 | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'James Harden' | 'Thunders' | 0 | 2009 | - | 'James Harden' | 'Rockets' | 0 | 2012 | - | 'Kobe Bryant' | 'Lakers' | 0 | 1996 | - | 'Tracy McGrady' | 'Raptors' | 0 | 1997 | - | 'Tracy McGrady' | 'Magic' | 0 | 2000 | - | 'Tracy McGrady' | 'Rockets' | 0 | 2004 | - | 'Tracy McGrady' | 'Spurs' | 0 | 2013 | - | 'Chris Paul' | 'Hornets' | 0 | 2005 | - | 'Chris Paul' | 'Clippers' | 0 | 2011 | - | 'Chris Paul' | 'Rockets' | 0 | 2017 | - | 'Boris Diaw' | 'Hawks' | 0 | 2003 | - | 'Boris Diaw' | 'Suns' | 0 | 2005 | - | 'Boris Diaw' | 'Hornets' | 0 | 2008 | - | 'Boris Diaw' | 'Spurs' | 0 | 2012 | - | 'Boris Diaw' | 'Jazz' | 0 | 2016 | - | 'LeBron James' | 'Cavaliers' | 0 | 2003 | - | 'LeBron James' | 'Heat' | 0 | 2010 | - | 'LeBron James' | 'Cavaliers' | 1 | 2014 | - | 'LeBron James' | 'Lakers' | 0 | 2018 | - | 'Klay Thompson' | 'Warriors' | 0 | 2011 | - | 'Kristaps Porzingis' | 'Knicks' | 0 | 2015 | - | 'Kristaps Porzingis' | 'Mavericks' | 0 | 2019 | - | 'Jonathon Simmons' | 'Spurs' | 0 | 2015 | - | 'Jonathon Simmons' | 'Magic' | 0 | 2017 | - | 'Jonathon Simmons' | '76ers' | 0 | 2019 | - | 'Marco Belinelli' | 'Warriors' | 0 | 2007 | - | 'Marco Belinelli' | 'Raptors' | 0 | 2009 | - | 'Marco Belinelli' | 'Hornets' | 0 | 2010 | - | 'Marco Belinelli' | 'Bulls' | 0 | 2012 | - | 'Marco Belinelli' | 'Spurs' | 0 | 2013 | - | 'Marco Belinelli' | 'Kings' | 0 | 2015 | - | 'Marco Belinelli' | 'Hornets' | 1 | 2016 | - | 'Marco Belinelli' | 'Hawks' | 0 | 2017 | - | 'Marco Belinelli' | '76ers' | 0 | 2018 | - | 'Marco Belinelli' | 'Spurs' | 1 | 2018 | - | 'Luka Doncic' | 'Mavericks' | 0 | 2018 | - | 'David West' | 'Hornets' | 0 | 2003 | - | 'David West' | 'Pacers' | 0 | 2011 | - | 'David West' | 'Spurs' | 0 | 2015 | - | 'David West' | 'Warriors' | 0 | 2016 | - | 'Tony Parker' | 'Spurs' | 0 | 1999 | - | 'Tony Parker' | 'Hornets' | 0 | 2018 | - | 'Danny Green' | 'Cavaliers' | 0 | 2009 | - | 'Danny Green' | 'Spurs' | 0 | 2010 | - | 'Danny Green' | 'Raptors' | 0 | 2018 | - | 'Rudy Gay' | 'Grizzlies' | 0 | 2006 | - | 'Rudy Gay' | 'Raptors' | 0 | 2013 | - | 'Rudy Gay' | 'Kings' | 0 | 2013 | - | 'Rudy Gay' | 'Spurs' | 0 | 2017 | - | 'LaMarcus Aldridge' | 'Trail Blazers' | 0 | 2006 | - | 'LaMarcus Aldridge' | 'Spurs' | 0 | 2015 | - | 'Tim Duncan' | 'Spurs' | 0 | 1997 | - | 'Kevin Durant' | 'Thunders' | 0 | 2007 | - | 'Kevin Durant' | 'Warriors' | 0 | 2016 | - | 'Stephen Curry' | 'Warriors' | 0 | 2009 | - | 'Ray Allen' | 'Bucks' | 0 | 1996 | - | 'Ray Allen' | 'Thunders' | 0 | 2003 | - | 'Ray Allen' | 'Celtics' | 0 | 2007 | - | 'Ray Allen' | 'Heat' | 0 | 2012 | - | 'Tiago Splitter' | 'Spurs' | 0 | 2010 | - | 'Tiago Splitter' | 'Hawks' | 0 | 2015 | - | 'Tiago Splitter' | '76ers' | 0 | 2017 | - | 'DeAndre Jordan' | 'Clippers' | 0 | 2008 | - | 'DeAndre Jordan' | 'Mavericks' | 0 | 2018 | - | 'DeAndre Jordan' | 'Knicks' | 0 | 2019 | - | 'Paul Gasol' | 'Grizzlies' | 0 | 2001 | - | 'Paul Gasol' | 'Lakers' | 0 | 2008 | - | 'Paul Gasol' | 'Bulls' | 0 | 2014 | - | 'Paul Gasol' | 'Spurs' | 0 | 2016 | - | 'Paul Gasol' | 'Bucks' | 0 | 2019 | - | 'Aron Baynes' | 'Spurs' | 0 | 2013 | - | 'Aron Baynes' | 'Pistons' | 0 | 2015 | - | 'Aron Baynes' | 'Celtics' | 0 | 2017 | - | 'Cory Joseph' | 'Spurs' | 0 | 2011 | - | 'Cory Joseph' | 'Raptors' | 0 | 2015 | - | 'Cory Joseph' | 'Pacers' | 0 | 2017 | - | 'Vince Carter' | 'Raptors' | 0 | 1998 | - | 'Vince Carter' | 'Nets' | 0 | 2004 | - | 'Vince Carter' | 'Magic' | 0 | 2009 | - | 'Vince Carter' | 'Suns' | 0 | 2010 | - | 'Vince Carter' | 'Mavericks' | 0 | 2011 | - | 'Vince Carter' | 'Grizzlies' | 0 | 2014 | - | 'Vince Carter' | 'Kings' | 0 | 2017 | - | 'Vince Carter' | 'Hawks' | 0 | 2018 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | - | 'Marc Gasol' | 'Raptors' | 0 | 2019 | - | 'Ricky Rubio' | 'Timberwolves' | 0 | 2011 | - | 'Ricky Rubio' | 'Jazz' | 0 | 2017 | - | 'Ben Simmons' | '76ers' | 0 | 2016 | - | 'Giannis Antetokounmpo' | 'Bucks' | 0 | 2013 | - | 'Rajon Rondo' | 'Celtics' | 0 | 2006 | - | 'Rajon Rondo' | 'Mavericks' | 0 | 2014 | - | 'Rajon Rondo' | 'Kings' | 0 | 2015 | - | 'Rajon Rondo' | 'Bulls' | 0 | 2016 | - | 'Rajon Rondo' | 'Pelicans' | 0 | 2017 | - | 'Rajon Rondo' | 'Lakers' | 0 | 2018 | - | 'Manu Ginobili' | 'Spurs' | 0 | 2002 | - | 'Kyrie Irving' | 'Cavaliers' | 0 | 2011 | - | 'Kyrie Irving' | 'Celtics' | 0 | 2017 | - | 'Carmelo Anthony' | 'Nuggets' | 0 | 2003 | - | 'Carmelo Anthony' | 'Knicks' | 0 | 2011 | - | 'Carmelo Anthony' | 'Thunders' | 0 | 2017 | - | 'Carmelo Anthony' | 'Rockets' | 0 | 2018 | - | 'Dwyane Wade' | 'Heat' | 0 | 2003 | - | 'Dwyane Wade' | 'Bulls' | 0 | 2016 | - | 'Dwyane Wade' | 'Cavaliers' | 0 | 2017 | - | 'Dwyane Wade' | 'Heat' | 1 | 2018 | - | 'Joel Embiid' | '76ers' | 0 | 2014 | - | 'Damian Lillard' | 'Trail Blazers' | 0 | 2012 | - | 'Yao Ming' | 'Rockets' | 0 | 2002 | - | 'Kyle Anderson' | 'Spurs' | 0 | 2014 | - | 'Kyle Anderson' | 'Grizzlies' | 0 | 2018 | - | 'Dejounte Murray' | 'Spurs' | 0 | 2016 | - | 'Blake Griffin' | 'Clippers' | 0 | 2009 | - | 'Blake Griffin' | 'Pistons' | 0 | 2018 | - | 'Steve Nash' | 'Suns' | 0 | 1996 | - | 'Steve Nash' | 'Mavericks' | 0 | 1998 | - | 'Steve Nash' | 'Suns' | 1 | 2004 | - | 'Steve Nash' | 'Lakers' | 0 | 2012 | - | 'Jason Kidd' | 'Mavericks' | 0 | 1994 | - | 'Jason Kidd' | 'Suns' | 0 | 1996 | - | 'Jason Kidd' | 'Nets' | 0 | 2001 | - | 'Jason Kidd' | 'Mavericks' | 1 | 2008 | - | 'Jason Kidd' | 'Knicks' | 0 | 2012 | - | 'Dirk Nowitzki' | 'Mavericks' | 0 | 1998 | - | 'Paul George' | 'Pacers' | 0 | 2010 | - | 'Paul George' | 'Thunders' | 0 | 2017 | - | 'Grant Hill' | 'Pistons' | 0 | 1994 | - | 'Grant Hill' | 'Magic' | 0 | 2000 | - | 'Grant Hill' | 'Suns' | 0 | 2007 | - | 'Grant Hill' | 'Clippers' | 0 | 2012 | - | "Shaquille O'Neal" | 'Magic' | 0 | 1992 | - | "Shaquille O'Neal" | 'Lakers' | 0 | 1996 | - | "Shaquille O'Neal" | 'Heat' | 0 | 2004 | - | "Shaquille O'Neal" | 'Suns' | 0 | 2008 | - | "Shaquille O'Neal" | 'Cavaliers' | 0 | 2009 | - | "Shaquille O'Neal" | 'Celtics' | 0 | 2010 | - | 'JaVale McGee' | 'Wizards' | 0 | 2008 | - | 'JaVale McGee' | 'Nuggets' | 0 | 2012 | - | 'JaVale McGee' | 'Mavericks' | 0 | 2015 | - | 'JaVale McGee' | 'Warriors' | 0 | 2016 | - | 'JaVale McGee' | 'Lakers' | 0 | 2018 | - | 'Dwight Howard' | 'Magic' | 0 | 2004 | - | 'Dwight Howard' | 'Lakers' | 0 | 2012 | - | 'Dwight Howard' | 'Rockets' | 0 | 2013 | - | 'Dwight Howard' | 'Hawks' | 0 | 2016 | - | 'Dwight Howard' | 'Hornets' | 0 | 2017 | - | 'Dwight Howard' | 'Wizards' | 0 | 2018 | + Then the result should be, in any order: + | startYear | + | 2002 | + | 2010 | + | 2015 | + | 2008 | + | 2009 | + | 2012 | + | 1996 | + | 1997 | + | 2000 | + | 2004 | + | 2013 | + | 2005 | + | 2011 | + | 2017 | + | 2003 | + | 2005 | + | 2008 | + | 2012 | + | 2016 | + | 2003 | + | 2010 | + | 2014 | + | 2018 | + | 2011 | + | 2015 | + | 2019 | + | 2015 | + | 2017 | + | 2019 | + | 2007 | + | 2009 | + | 2010 | + | 2012 | + | 2013 | + | 2015 | + | 2016 | + | 2017 | + | 2018 | + | 2018 | + | 2018 | + | 2003 | + | 2011 | + | 2015 | + | 2016 | + | 1999 | + | 2018 | + | 2009 | + | 2010 | + | 2018 | + | 2006 | + | 2013 | + | 2013 | + | 2017 | + | 2006 | + | 2015 | + | 1997 | + | 2007 | + | 2016 | + | 2009 | + | 1996 | + | 2003 | + | 2007 | + | 2012 | + | 2010 | + | 2015 | + | 2017 | + | 2008 | + | 2018 | + | 2019 | + | 2001 | + | 2008 | + | 2014 | + | 2016 | + | 2019 | + | 2013 | + | 2015 | + | 2017 | + | 2011 | + | 2015 | + | 2017 | + | 1998 | + | 2004 | + | 2009 | + | 2010 | + | 2011 | + | 2014 | + | 2017 | + | 2018 | + | 2008 | + | 2019 | + | 2011 | + | 2017 | + | 2016 | + | 2013 | + | 2006 | + | 2014 | + | 2015 | + | 2016 | + | 2017 | + | 2018 | + | 2002 | + | 2011 | + | 2017 | + | 2003 | + | 2011 | + | 2017 | + | 2018 | + | 2003 | + | 2016 | + | 2017 | + | 2018 | + | 2014 | + | 2012 | + | 2002 | + | 2014 | + | 2018 | + | 2016 | + | 2009 | + | 2018 | + | 1996 | + | 1998 | + | 2004 | + | 2012 | + | 1994 | + | 1996 | + | 2001 | + | 2008 | + | 2012 | + | 1998 | + | 2010 | + | 2017 | + | 1994 | + | 2000 | + | 2007 | + | 2012 | + | 1992 | + | 1996 | + | 2004 | + | 2008 | + | 2009 | + | 2010 | + | 2008 | + | 2012 | + | 2015 | + | 2016 | + | 2018 | + | 2004 | + | 2012 | + | 2013 | + | 2016 | + | 2017 | + | 2018 | Scenario: [2] Edge TODO Given a graph with space named "nba_int_vid" When executing query: """ - LOOKUP ON serve WHERE 1 + 1 == 2 + LOOKUP ON serve WHERE 1 + 1 == 2 YIELD serve.start_year """ Then a SemanticError should be raised at runtime: When executing query: @@ -434,7 +434,7 @@ Feature: Lookup by index itself in integer vid Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON serve WHERE serve.start_year == serve.end_year + LOOKUP ON serve WHERE serve.start_year == serve.end_year YIELD serve.start_year """ Then a SemanticError should be raised at runtime: When executing query: @@ -447,18 +447,18 @@ Feature: Lookup by index itself in integer vid Given a graph with space named "nba_int_vid" When executing query: """ - LOOKUP ON player WHERE player.age == 40 YIELD player.age AS Age + LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | + | name | Age | | "Dirk Nowitzki" | 40 | | "Kobe Bryant" | 40 | When executing query: """ - LOOKUP ON player WHERE player.age > 40 YIELD player.age AS Age + LOOKUP ON player WHERE player.age > 40 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -469,10 +469,10 @@ Feature: Lookup by index itself in integer vid | "Vince Carter" | 42 | When executing query: """ - LOOKUP ON player WHERE player.age >= 40.0 YIELD player.age AS Age + LOOKUP ON player WHERE player.age >= 40.0 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -485,10 +485,10 @@ Feature: Lookup by index itself in integer vid | "Kobe Bryant" | 40 | When executing query: """ - LOOKUP ON player WHERE player.age > 40.5 YIELD player.age AS Age + LOOKUP ON player WHERE player.age > 40.5 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -499,10 +499,10 @@ Feature: Lookup by index itself in integer vid | "Vince Carter" | 42 | When executing query: """ - LOOKUP ON player WHERE player.age >= 40.5 YIELD player.age AS Age + LOOKUP ON player WHERE player.age >= 40.5 YIELD id(vertex) as name, player.age AS Age """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | Age | + | name | Age | | "Grant Hill" | 46 | | "Jason Kidd" | 45 | | "Manu Ginobili" | 41 | @@ -516,35 +516,35 @@ Feature: Lookup by index itself in integer vid LOOKUP ON player WHERE player.age < 40 YIELD player.age AS Age, player.name AS Name | order by $-.Age DESC, $-.Name| limit 10 """ - Then the result should be, in order, with relax comparison, and the columns 0 should be hashed: - | VertexID | Age | Name | - | "Tracy McGrady" | 39 | "Tracy McGrady" | - | "David West" | 38 | "David West" | - | "Paul Gasol" | 38 | "Paul Gasol" | - | "Yao Ming" | 38 | "Yao Ming" | - | "Dwyane Wade" | 37 | "Dwyane Wade" | - | "Amar'e Stoudemire" | 36 | "Amar'e Stoudemire" | - | "Boris Diaw" | 36 | "Boris Diaw" | - | "Tony Parker" | 36 | "Tony Parker" | - | "Carmelo Anthony" | 34 | "Carmelo Anthony" | - | "LeBron James" | 34 | "LeBron James" | + Then the result should be, in order, with relax comparison: + | Age | Name | + | 39 | "Tracy McGrady" | + | 38 | "David West" | + | 38 | "Paul Gasol" | + | 38 | "Yao Ming" | + | 37 | "Dwyane Wade" | + | 36 | "Amar'e Stoudemire" | + | 36 | "Boris Diaw" | + | 36 | "Tony Parker" | + | 34 | "Carmelo Anthony" | + | 34 | "LeBron James" | When executing query: """ LOOKUP ON player WHERE player.age <= 40 YIELD player.age AS Age, player.name AS Name | order by $-.Age DESC, $-.Name| limit 10 """ - Then the result should be, in order, with relax comparison, and the columns 0 should be hashed: - | VertexID | Age | Name | - | "Dirk Nowitzki" | 40 | "Dirk Nowitzki" | - | "Kobe Bryant" | 40 | "Kobe Bryant" | - | "Tracy McGrady" | 39 | "Tracy McGrady" | - | "David West" | 38 | "David West" | - | "Paul Gasol" | 38 | "Paul Gasol" | - | "Yao Ming" | 38 | "Yao Ming" | - | "Dwyane Wade" | 37 | "Dwyane Wade" | - | "Amar'e Stoudemire" | 36 | "Amar'e Stoudemire" | - | "Boris Diaw" | 36 | "Boris Diaw" | - | "Tony Parker" | 36 | "Tony Parker" | + Then the result should be, in order, with relax comparison: + | Age | Name | + | 40 | "Dirk Nowitzki" | + | 40 | "Kobe Bryant" | + | 39 | "Tracy McGrady" | + | 38 | "David West" | + | 38 | "Paul Gasol" | + | 38 | "Yao Ming" | + | 37 | "Dwyane Wade" | + | 36 | "Amar'e Stoudemire" | + | 36 | "Boris Diaw" | + | 36 | "Tony Parker" | Scenario: [2] Compare INT and FLOAT during IndexScan Given an empty graph @@ -562,26 +562,26 @@ Feature: Lookup by index itself in integer vid """ When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT > 70; + LOOKUP ON weight WHERE weight.WEIGHT > 70 YIELD id(vertex) as name; """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT > 70.4; + LOOKUP ON weight WHERE weight.WEIGHT > 70.4 YIELD id(vertex) as name; """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | When executing query: """ - LOOKUP ON weight WHERE weight.WEIGHT >= 70.5; + LOOKUP ON weight WHERE weight.WEIGHT >= 70.5 YIELD id(vertex) as name; """ Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | + | name | | "Tim Duncan" | | "Tony Parker" | Then drop the used space diff --git a/tests/tck/features/lookup/EdgeIndexFullScan.feature b/tests/tck/features/lookup/EdgeIndexFullScan.feature index d5aae560e51..d308368b21e 100644 --- a/tests/tck/features/lookup/EdgeIndexFullScan.feature +++ b/tests/tck/features/lookup/EdgeIndexFullScan.feature @@ -43,12 +43,12 @@ Feature: Lookup edge index full scan Scenario: Edge with relational NE filter When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col1_str != "Yellow" YIELD edge_1.col1_str + LOOKUP ON edge_1 WHERE edge_1.col1_str != "Yellow" YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | - | "101" | "102" | 0 | "Red1" | - | "103" | "101" | 0 | "Blue" | + | src | dst | rank | edge_1.col1_str | + | "101" | "102" | 0 | "Red1" | + | "103" | "101" | 0 | "Blue" | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -57,12 +57,12 @@ Feature: Lookup edge index full scan | 0 | Start | | | When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col2_int != 11 YIELD edge_1.col2_int + LOOKUP ON edge_1 WHERE edge_1.col2_int != 11 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2_int | - | "103" | "101" | 0 | 33 | - | "102" | "103" | 0 | 22 | + | src | dst | rank | edge_1.col2_int | + | "103" | "101" | 0 | 33 | + | "102" | "103" | 0 | 22 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -73,11 +73,11 @@ Feature: Lookup edge index full scan Scenario: Edge with simple relational IN filter When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col1_str IN ["Red", "Yellow"] YIELD edge_1.col1_str + LOOKUP ON edge_1 WHERE edge_1.col1_str IN ["Red", "Yellow"] YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | - | "102" | "103" | 0 | "Yellow" | + | src | dst | rank | edge_1.col1_str | + | "102" | "103" | 0 | "Yellow" | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -88,15 +88,15 @@ Feature: Lookup edge index full scan LOOKUP ON edge_1 WHERE edge_1.col1_str IN ["non-existed-name"] YIELD edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | + | edge_1.col1_str | When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col2_int IN [23 - 1 , 66/2] YIELD edge_1.col2_int + LOOKUP ON edge_1 WHERE edge_1.col2_int IN [23 - 1 , 66/2] YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2_int | - | "103" | "101" | 0 | 33 | - | "102" | "103" | 0 | 22 | + | src | dst | rank | edge_1.col2_int | + | "103" | "101" | 0 | 33 | + | "102" | "103" | 0 | 22 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -107,13 +107,13 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [23 - 1 , 66/2] OR edge_1.col2_int==11 - YIELD edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2_int | - | "101" | "102" | 0 | 11 | - | "102" | "103" | 0 | 22 | - | "103" | "101" | 0 | 33 | + | src | dst | rank | edge_1.col2_int | + | "101" | "102" | 0 | 11 | + | "102" | "103" | 0 | 22 | + | "103" | "101" | 0 | 33 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -124,13 +124,13 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [23 - 1 , 66/2] OR edge_1.col1_str IN [toUpper("r")+"ed1"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | - | "102" | "103" | 0 | "Yellow" | 22 | - | "103" | "101" | 0 | "Blue" | 33 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | + | "102" | "103" | 0 | "Yellow" | 22 | + | "103" | "101" | 0 | "Blue" | 33 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -141,11 +141,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [11 , 66/2] AND edge_1.col2_int==11 - YIELD edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2_int | - | "101" | "102" | 0 | 11 | + | src | dst | rank | edge_1.col2_int | + | "101" | "102" | 0 | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -160,11 +160,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [11 , 33] AND edge_1.col1_str IN ["Red1"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -176,11 +176,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [11 , 33] AND edge_1.col1_str IN ["Red1", "ABC"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -203,11 +203,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [11 , 33] AND edge_1.col1_str IN ["Red1", "ABC"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -231,11 +231,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col1_str IN ["Red1", "ABC"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -246,11 +246,11 @@ Feature: Lookup edge index full scan """ LOOKUP ON edge_1 WHERE edge_1.col2_int IN [11 , 33] AND edge_1.col1_str IN ["Red1", "ABC"] - YIELD edge_1.col1_str, edge_1.col2_int + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | edge_1.col2_int | - | "101" | "102" | 0 | "Red1" | 11 | + | src | dst | rank | edge_1.col1_str | edge_1.col2_int | + | "101" | "102" | 0 | "Red1" | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 4 | | @@ -260,12 +260,12 @@ Feature: Lookup edge index full scan Scenario: Edge with relational NOT IN filter When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col1_str NOT IN ["Blue"] YIELD edge_1.col1_str + LOOKUP ON edge_1 WHERE edge_1.col1_str NOT IN ["Blue"] YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | - | "101" | "102" | 0 | "Red1" | - | "102" | "103" | 0 | "Yellow" | + | src | dst | rank | edge_1.col1_str | + | "101" | "102" | 0 | "Red1" | + | "102" | "103" | 0 | "Yellow" | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -274,11 +274,11 @@ Feature: Lookup edge index full scan | 0 | Start | | | When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col2_int NOT IN [23 - 1 , 66/2] YIELD edge_1.col2_int + LOOKUP ON edge_1 WHERE edge_1.col2_int NOT IN [23 - 1 , 66/2] YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col2_int """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col2_int | - | "101" | "102" | 0 | 11 | + | src | dst | rank | edge_1.col2_int | + | "101" | "102" | 0 | 11 | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -301,11 +301,11 @@ Feature: Lookup edge index full scan Scenario: Edge with relational STARTS/NOT STARTS WITH filter When profiling query: """ - LOOKUP ON edge_1 WHERE edge_1.col1_str STARTS WITH toUpper("r") YIELD edge_1.col1_str + LOOKUP ON edge_1 WHERE edge_1.col1_str STARTS WITH toUpper("r") YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | - | "101" | "102" | 0 | "Red1" | + | src | dst | rank | edge_1.col1_str | + | "101" | "102" | 0 | "Red1" | And the execution plan should be: | id | name | dependencies | operator info | | 3 | Project | 2 | | @@ -317,7 +317,7 @@ Feature: Lookup edge index full scan LOOKUP ON edge_1 WHERE edge_1.col1_str STARTS WITH "ABC" YIELD edge_1.col1_str """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | edge_1.col1_str | + | edge_1.col1_str | When executing query: """ LOOKUP ON edge_1 WHERE edge_1.col1_str STARTS WITH 123 YIELD edge_1.col1_str diff --git a/tests/tck/features/lookup/LookUp.IntVid.feature b/tests/tck/features/lookup/LookUp.IntVid.feature index 3628a5cd477..e7f8cff46e7 100644 --- a/tests/tck/features/lookup/LookUp.IntVid.feature +++ b/tests/tck/features/lookup/LookUp.IntVid.feature @@ -33,17 +33,17 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col2 == 200 + LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 200 | + | id | + | 200 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col1 == true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col1 == true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: LookupTest IntVid EdgeIndexHint @@ -68,14 +68,14 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col2 == 201 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col2 == 201 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 200 | 201 | 0 | + | src | dst | rank | + | 200 | 201 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col1 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col1 == 200 YIELD edge as e """ Then a SemanticError should be raised at runtime: Then drop the used space @@ -104,131 +104,131 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | + | id | + | 220 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 OR lookup_tag_2.col2 == 200 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 OR lookup_tag_2.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | + | id | + | 220 | + | 221 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 == true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 == true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 != true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 != true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >= 100 AND lookup_tag_2.col2 <= 400 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >= 100 AND lookup_tag_2.col2 <= 400 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | + | id | + | 220 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 200 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 > 100.5 - YIELD lookup_tag_2.col3 AS col3 + YIELD id(vertex) as id, lookup_tag_2.col3 AS col3 """ Then the result should be, in any order: - | VertexID | col3 | - | 221 | 200.5 | - | 222 | 300.5 | - | 223 | 400.5 | - | 224 | 500.5 | - | 225 | 600.5 | + | id | col3 | + | 221 | 200.5 | + | 222 | 300.5 | + | 223 | 400.5 | + | 224 | 500.5 | + | 225 | 600.5 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | + | id | + | 220 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.1 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.1 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 >= 100.5 AND lookup_tag_2.col3 <= 300.5 - YIELD lookup_tag_2.col3 AS col3 + YIELD id(vertex) as id, lookup_tag_2.col3 AS col3 """ Then the result should be, in any order: - | VertexID | col3 | - | 220 | 100.5 | - | 221 | 200.5 | - | 222 | 300.5 | + | id | col3 | + | 220 | 100.5 | + | 221 | 200.5 | + | 222 | 300.5 | Then drop the used space Scenario: LookupTest IntVid EdgeConditionScan @@ -253,90 +253,90 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 OR lookup_edge_2.col2 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 OR lookup_edge_2.col2 == 200 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | - | 220 | 222 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | + | 220 | 222 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 > 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 > 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 222 | 0 | - | 220 | 223 | 0 | - | 220 | 224 | 0 | - | 220 | 225 | 0 | + | src | dst | rank | + | 220 | 222 | 0 | + | 220 | 223 | 0 | + | 220 | 224 | 0 | + | 220 | 225 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 != 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 != 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 222 | 0 | - | 220 | 223 | 0 | - | 220 | 224 | 0 | - | 220 | 225 | 0 | + | src | dst | rank | + | 220 | 222 | 0 | + | 220 | 223 | 0 | + | 220 | 224 | 0 | + | 220 | 225 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | - | 220 | 222 | 0 | - | 220 | 223 | 0 | - | 220 | 224 | 0 | - | 220 | 225 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | + | 220 | 222 | 0 | + | 220 | 223 | 0 | + | 220 | 224 | 0 | + | 220 | 225 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 == true + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 == true YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | - | 220 | 222 | 0 | - | 220 | 223 | 0 | - | 220 | 224 | 0 | - | 220 | 225 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | + | 220 | 222 | 0 | + | 220 | 223 | 0 | + | 220 | 224 | 0 | + | 220 | 225 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 != true + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 != true YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col2 <= 400 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col2 <= 400 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | - | 220 | 222 | 0 | - | 220 | 223 | 0 | - | 220 | 224 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | + | 220 | 222 | 0 | + | 220 | 223 | 0 | + | 220 | 224 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 200 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ LOOKUP ON lookup_edge_2 @@ -344,24 +344,24 @@ Feature: LookUpTest_Vid_Int YIELD lookup_edge_2.col3 AS col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col3 | - | 220 | 222 | 0 | 200.5 | - | 220 | 223 | 0 | 300.5 | - | 220 | 224 | 0 | 400.5 | - | 220 | 225 | 0 | 500.5 | + | col3 | + | 200.5 | + | 300.5 | + | 400.5 | + | 500.5 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 220 | 221 | 0 | + | src | dst | rank | + | 220 | 221 | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.1 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.1 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ LOOKUP ON lookup_edge_2 @@ -369,10 +369,10 @@ Feature: LookUpTest_Vid_Int YIELD lookup_edge_2.col3 AS col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col3 | - | 220 | 221 | 0 | 100.5 | - | 220 | 222 | 0 | 200.5 | - | 220 | 223 | 0 | 300.5 | + | col3 | + | 100.5 | + | 200.5 | + | 300.5 | Then drop the used space Scenario: LookupTest IntVid FunctionExprTest @@ -399,85 +399,85 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_2 WHERE 1 == 1 + LOOKUP ON lookup_tag_2 WHERE 1 == 1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE 1 != 1 + LOOKUP ON lookup_tag_2 WHERE 1 != 1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE udf_is_in(lookup_tag_2.col2, 100, 200) + LOOKUP ON lookup_tag_2 WHERE udf_is_in(lookup_tag_2.col2, 100, 200) YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > abs(-5) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > abs(-5) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < abs(-5) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < abs(-5) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > (1 + 2) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > (1 + 2) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < (1 + 2) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < (1 + 2) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 != (true and true) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 != (true and true) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true and true) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true and true) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true or false) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true or false) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 220 | - | 221 | - | 222 | - | 223 | - | 224 | - | 225 | + | id | + | 220 | + | 221 | + | 222 | + | 223 | + | 224 | + | 225 | # FIXME(aiee): should support later by folding constants # When executing query: # """ @@ -493,7 +493,7 @@ Feature: LookUpTest_Vid_Int # | VertexID | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != lookup_tag_2.col3 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != lookup_tag_2.col3 YIELD id(vertex) as id """ Then a SemanticError should be raised at runtime: # FIXME(aiee): should support later @@ -549,11 +549,11 @@ Feature: LookUpTest_Vid_Int Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON student WHERE student.number == 1 YIELD student.age + LOOKUP ON student WHERE student.number == 1 YIELD id(vertex) as id, student.age """ Then the result should be, in any order: - | VertexID | student.age | - | 220 | 20 | + | id | student.age | + | 220 | 20 | Then drop the used space Scenario: LookupTest IntVid OptimizerTest @@ -590,52 +590,52 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c1 == 1 + LOOKUP ON t1 where t1.c1 == 1 YIELD t1.c2 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c1 == 1 and t1.c2 > 1 + LOOKUP ON t1 where t1.c1 == 1 and t1.c2 > 1 YIELD t1.c3 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c1 > 1 and t1.c2 == 1 + LOOKUP ON t1 where t1.c1 > 1 and t1.c2 == 1 YIELD t1.c2 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c1 > 1 and t1.c2 == 1 and t1.c3 == 1 + LOOKUP ON t1 where t1.c1 > 1 and t1.c2 == 1 and t1.c3 == 1 YIELD t1.c1 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c3 > 1 + LOOKUP ON t1 where t1.c3 > 1 YIELD id(vertex) """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c3 > 1 and t1.c1 >1 + LOOKUP ON t1 where t1.c3 > 1 and t1.c1 >1 YIELD id(vertex) """ Then the execution should be successful When executing query: """ - LOOKUP on t1 WHERE t1.c4 > 1 + LOOKUP on t1 WHERE t1.c4 > 1 YIELD t1.c4 """ Then the execution should be successful When executing query: """ - LOOKUP on t1 WHERE t1.c2 > 1 and t1.c3 > 1 + LOOKUP on t1 WHERE t1.c2 > 1 and t1.c3 > 1 YIELD t1.c4 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c2 > 1 and t1.c1 != 1 + LOOKUP ON t1 where t1.c2 > 1 and t1.c1 != 1 YIELD t1.c1 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 where t1.c2 != 1 + LOOKUP ON t1 where t1.c2 != 1 YIELD t1.c2 """ Then the execution should be successful Then drop the used space @@ -674,42 +674,42 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c1 == 1 + LOOKUP on t1_str WHERE t1_str.c1 == 1 YIELD id(vertex) """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c1 == 1 and t1_str.c2 >1 + LOOKUP on t1_str WHERE t1_str.c1 == 1 and t1_str.c2 >1 YIELD t1_str.c2 """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c3 == "a" + LOOKUP on t1_str WHERE t1_str.c3 == "a" YIELD t1_str.c3 """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c4 == "a" + LOOKUP on t1_str WHERE t1_str.c4 == "a" YIELD t1_str.c4 """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c4 == "a" + LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c4 == "a" YIELD t1_str.c1 """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c1 == 1 + LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c1 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c2 == 1 and t1_str.c1 == 1 + LOOKUP on t1_str WHERE t1_str.c3 == "a" and t1_str.c2 == 1 and t1_str.c1 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP on t1_str WHERE t1_str.c4 == "a" and t1_str.c3 == "a" and t1_str.c2 == 1 and t1_str.c1 == 1 + LOOKUP on t1_str WHERE t1_str.c4 == "a" and t1_str.c3 == "a" and t1_str.c2 == 1 and t1_str.c1 == 1 YIELD id(vertex) """ Then the execution should be successful Then drop the used space @@ -751,46 +751,46 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 1 | + | id | + | 1 | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 and tag_with_str.c2 == "ccc" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 and tag_with_str.c2 == "ccc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 and tag_with_str.c2 == "c1_row1" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 and tag_with_str.c2 == "c1_row1" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 1 | + | id | + | 1 | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 and tag_with_str.c2 == "ab" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 and tag_with_str.c2 == "ab" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 5 | + | id | + | 5 | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c2 == "abc" and tag_with_str.c3 == "abc" + LOOKUP ON tag_with_str WHERE tag_with_str.c2 == "abc" and tag_with_str.c3 == "abc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 3 | - | 4 | + | id | + | 3 | + | 4 | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 and tag_with_str.c2 == "abca" and tag_with_str.c3 == "bc" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 and tag_with_str.c2 == "abca" and tag_with_str.c3 == "bc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 6 | + | id | + | 6 | Then drop the used space Scenario: LookupTest IntVid ConditionTest @@ -821,10 +821,12 @@ Feature: LookUpTest_Vid_Int identity.NATION == "汉族" AND identity.BIRTHDAY > 19620101 AND identity.BIRTHDAY < 20021231 AND - identity.BIRTHPLACE_CITY == "bbb"; + identity.BIRTHPLACE_CITY == "bbb" + YIELD + id(vertex) as id; """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: LookupTest no index to use at runtime @@ -840,7 +842,7 @@ Feature: LookUpTest_Vid_Int Then the execution should be successful When executing query: """ - LOOKUP ON player WHERE player.name == 'Tim' + LOOKUP ON player WHERE player.name == 'Tim' YIELD vertex as node """ Then an ExecutionError should be raised at runtime: There is no index to use at runtime Then drop the used space diff --git a/tests/tck/features/lookup/LookUp.feature b/tests/tck/features/lookup/LookUp.feature index ce6700e6697..9c60ab73a9e 100644 --- a/tests/tck/features/lookup/LookUp.feature +++ b/tests/tck/features/lookup/LookUp.feature @@ -31,17 +31,17 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col2 == 200 + LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "200" | + | id | + | "200" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col1 == true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col1 == true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: LookupTest EdgeIndexHint @@ -66,14 +66,14 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col2 == 201 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col2 == 201 YIELD src(edge) as src, dst(Edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "200" | "201" | 0 | + | src | dst | rank | + | "200" | "201" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col1 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col1 == 200 YIELD edge as e """ Then a SemanticError should be raised at runtime: Then drop the used space @@ -102,131 +102,131 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | + | id | + | "220" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 OR lookup_tag_2.col2 == 200 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 == 100 OR lookup_tag_2.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | + | id | + | "220" | + | "221" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 == true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 == true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 != true + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >=100 AND lookup_tag_2.col4 != true YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >= 100 AND lookup_tag_2.col2 <= 400 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 >= 100 AND lookup_tag_2.col2 <= 400 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 100 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | + | id | + | "220" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 200 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 AND lookup_tag_2.col2 == 200 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 > 100.5 - YIELD lookup_tag_2.col3 AS col3 + YIELD id(vertex) as id, lookup_tag_2.col3 AS col3 """ Then the result should be, in any order: - | VertexID | col3 | - | "221" | 200.5 | - | "222" | 300.5 | - | "223" | 400.5 | - | "224" | 500.5 | - | "225" | 600.5 | + | id | col3 | + | "221" | 200.5 | + | "222" | 300.5 | + | "223" | 400.5 | + | "224" | 500.5 | + | "225" | 600.5 | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.5 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | + | id | + | "220" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.1 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 == 100.1 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col3 >= 100.5 AND lookup_tag_2.col3 <= 300.5 - YIELD lookup_tag_2.col3 AS col3 + YIELD id(vertex) as id, lookup_tag_2.col3 AS col3 """ Then the result should be, in any order: - | VertexID | col3 | - | "220" | 100.5 | - | "221" | 200.5 | - | "222" | 300.5 | + | id | col3 | + | "220" | 100.5 | + | "221" | 200.5 | + | "222" | 300.5 | Then drop the used space Scenario: LookupTest EdgeConditionScan @@ -251,90 +251,90 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 OR lookup_edge_2.col2 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 == 100 OR lookup_edge_2.col2 == 200 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | - | "220" | "222" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | + | "220" | "222" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 > 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 > 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "222" | 0 | - | "220" | "223" | 0 | - | "220" | "224" | 0 | - | "220" | "225" | 0 | + | src | dst | rank | + | "220" | "222" | 0 | + | "220" | "223" | 0 | + | "220" | "224" | 0 | + | "220" | "225" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 != 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 != 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "222" | 0 | - | "220" | "223" | 0 | - | "220" | "224" | 0 | - | "220" | "225" | 0 | + | src | dst | rank | + | "220" | "222" | 0 | + | "220" | "223" | 0 | + | "220" | "224" | 0 | + | "220" | "225" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | - | "220" | "222" | 0 | - | "220" | "223" | 0 | - | "220" | "224" | 0 | - | "220" | "225" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | + | "220" | "222" | 0 | + | "220" | "223" | 0 | + | "220" | "224" | 0 | + | "220" | "225" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 == true + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 == true YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | - | "220" | "222" | 0 | - | "220" | "223" | 0 | - | "220" | "224" | 0 | - | "220" | "225" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | + | "220" | "222" | 0 | + | "220" | "223" | 0 | + | "220" | "224" | 0 | + | "220" | "225" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 != true + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col4 != true YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col2 <= 400 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col2 >= 100 AND lookup_edge_2.col2 <= 400 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | - | "220" | "222" | 0 | - | "220" | "223" | 0 | - | "220" | "224" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | + | "220" | "222" | 0 | + | "220" | "223" | 0 | + | "220" | "224" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 100 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 100 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 200 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 AND lookup_edge_2.col2 == 200 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ LOOKUP ON lookup_edge_2 @@ -342,35 +342,35 @@ Feature: LookUpTest_Vid_String YIELD lookup_edge_2.col3 AS col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col3 | - | "220" | "222" | 0 | 200.5 | - | "220" | "223" | 0 | 300.5 | - | "220" | "224" | 0 | 400.5 | - | "220" | "225" | 0 | 500.5 | + | col3 | + | 200.5 | + | 300.5 | + | 400.5 | + | 500.5 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.5 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "220" | "221" | 0 | + | src | dst | rank | + | "220" | "221" | 0 | When executing query: """ - LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.1 + LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 == 100.1 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ LOOKUP ON lookup_edge_2 WHERE lookup_edge_2.col3 >= 100.5 AND lookup_edge_2.col3 <= 300.5 - YIELD lookup_edge_2.col3 AS col3 + YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank, lookup_edge_2.col3 AS col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col3 | - | "220" | "221" | 0 | 100.5 | - | "220" | "222" | 0 | 200.5 | - | "220" | "223" | 0 | 300.5 | + | src | dst | rank | col3 | + | "220" | "221" | 0 | 100.5 | + | "220" | "222" | 0 | 200.5 | + | "220" | "223" | 0 | 300.5 | Then drop the used space Scenario: LookupTest FunctionExprTest @@ -397,85 +397,85 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_2 WHERE 1 == 1 + LOOKUP ON lookup_tag_2 WHERE 1 == 1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE 1 != 1 + LOOKUP ON lookup_tag_2 WHERE 1 != 1 YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE udf_is_in(lookup_tag_2.col2, 100, 200) + LOOKUP ON lookup_tag_2 WHERE udf_is_in(lookup_tag_2.col2, 100, 200) YIELD vertex as node """ Then a SemanticError should be raised at runtime: When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > abs(-5) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > abs(-5) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < abs(-5) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < abs(-5) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > (1 + 2) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 > (1 + 2) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < (1 + 2) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 < (1 + 2) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 != (true AND true) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 != (true AND true) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true AND true) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true AND true) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true or false) + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col4 == (true or false) YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "220" | - | "221" | - | "222" | - | "223" | - | "224" | - | "225" | + | id | + | "220" | + | "221" | + | "222" | + | "223" | + | "224" | + | "225" | # FIXME(aiee): should support later by folding constants # When executing query: # """ @@ -485,7 +485,7 @@ Feature: LookUpTest_Vid_String # | VertexID | When executing query: """ - LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != lookup_tag_2.col3 + LOOKUP ON lookup_tag_2 WHERE lookup_tag_2.col2 != lookup_tag_2.col3 YIELD id(vertex) as id """ Then a SemanticError should be raised at runtime: # FIXME(aiee): should support later @@ -533,11 +533,11 @@ Feature: LookUpTest_Vid_String Then a SemanticError should be raised at runtime. When executing query: """ - LOOKUP ON student WHERE student.number == 1 YIELD student.age + LOOKUP ON student WHERE student.number == 1 YIELD id(vertex) as name, student.age """ Then the result should be, in any order: - | VertexID | student.age | - | "220" | 20 | + | name | student.age | + | "220" | 20 | Then drop the used space Scenario: LookupTest OptimizerTest @@ -568,52 +568,52 @@ Feature: LookUpTest_Vid_String And wait 6 seconds When executing query: """ - LOOKUP ON t1 WHERE t1.c1 == 1 + LOOKUP ON t1 WHERE t1.c1 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c1 == 1 AND t1.c2 > 1 + LOOKUP ON t1 WHERE t1.c1 == 1 AND t1.c2 > 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c1 > 1 AND t1.c2 == 1 + LOOKUP ON t1 WHERE t1.c1 > 1 AND t1.c2 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c1 > 1 AND t1.c2 == 1 AND t1.c3 == 1 + LOOKUP ON t1 WHERE t1.c1 > 1 AND t1.c2 == 1 AND t1.c3 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c3 > 1 + LOOKUP ON t1 WHERE t1.c3 > 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c3 > 1 AND t1.c1 >1 + LOOKUP ON t1 WHERE t1.c3 > 1 AND t1.c1 >1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c4 > 1 + LOOKUP ON t1 WHERE t1.c4 > 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c2 > 1 AND t1.c3 > 1 + LOOKUP ON t1 WHERE t1.c2 > 1 AND t1.c3 > 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c2 > 1 AND t1.c1 != 1 + LOOKUP ON t1 WHERE t1.c2 > 1 AND t1.c1 != 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1 WHERE t1.c2 != 1 + LOOKUP ON t1 WHERE t1.c2 != 1 YIELD vertex as node """ Then the execution should be successful Then drop the used space @@ -646,42 +646,42 @@ Feature: LookUpTest_Vid_String And wait 6 seconds When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c1 == 1 + LOOKUP ON t1_str WHERE t1_str.c1 == 1 YIELD t1_str.c1 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c1 == 1 AND t1_str.c2 >1 + LOOKUP ON t1_str WHERE t1_str.c1 == 1 AND t1_str.c2 >1 YIELD t1_str.c2 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c3 == "a" + LOOKUP ON t1_str WHERE t1_str.c3 == "a" YIELD t1_str.c4 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c4 == "a" + LOOKUP ON t1_str WHERE t1_str.c4 == "a" YIELD t1_str.c1 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c4 == "a" + LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c4 == "a" YIELD t1_str.c1 """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c1 == 1 + LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c1 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c2 == 1 AND t1_str.c1 == 1 + LOOKUP ON t1_str WHERE t1_str.c3 == "a" AND t1_str.c2 == 1 AND t1_str.c1 == 1 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON t1_str WHERE t1_str.c4 == "a" AND t1_str.c3 == "a" AND t1_str.c2 == 1 AND t1_str.c1 == 1 + LOOKUP ON t1_str WHERE t1_str.c4 == "a" AND t1_str.c3 == "a" AND t1_str.c2 == 1 AND t1_str.c1 == 1 YIELD vertex as node """ Then the execution should be successful Then drop the used space @@ -719,46 +719,46 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "1" | + | id | + | "1" | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 AND tag_with_str.c2 == "ccc" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 AND tag_with_str.c2 == "ccc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 AND tag_with_str.c2 == "c1_row1" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 1 AND tag_with_str.c2 == "c1_row1" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "1" | + | id | + | "1" | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 AND tag_with_str.c2 == "ab" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 AND tag_with_str.c2 == "ab" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "5" | + | id | + | "5" | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c2 == "abc" AND tag_with_str.c3 == "abc" + LOOKUP ON tag_with_str WHERE tag_with_str.c2 == "abc" AND tag_with_str.c3 == "abc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "3" | - | "4" | + | id | + | "3" | + | "4" | When executing query: """ - LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 AND tag_with_str.c2 == "abca" AND tag_with_str.c3 == "bc" + LOOKUP ON tag_with_str WHERE tag_with_str.c1 == 5 AND tag_with_str.c2 == "abca" AND tag_with_str.c3 == "bc" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | "6" | + | id | + | "6" | Then drop the used space Scenario: LookupTest ConditionTest @@ -787,10 +787,12 @@ Feature: LookUpTest_Vid_String identity.NATION == "汉族" AND identity.BIRTHDAY > 19620101 AND identity.BIRTHDAY < 20021231 AND - identity.BIRTHPLACE_CITY == "bbb"; + identity.BIRTHPLACE_CITY == "bbb" + YIELD + id(vertex) as id; """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario: LookupTest from pytest @@ -852,109 +854,109 @@ Feature: LookUpTest_Vid_String """ When executing query: """ - LOOKUP ON serve where serve.start_year > 0 + LOOKUP ON serve where serve.start_year > 0 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | '100' | '200' | 0 | - | '101' | '201' | 0 | - | '102' | '202' | 0 | - | '103' | '203' | 0 | - | '105' | '204' | 0 | - | '121' | '201' | 0 | + | src | dst | rank | + | '100' | '200' | 0 | + | '101' | '201' | 0 | + | '102' | '202' | 0 | + | '103' | '203' | 0 | + | '105' | '204' | 0 | + | '121' | '201' | 0 | When executing query: """ - LOOKUP ON serve where serve.start_year > 1997 and serve.end_year < 2020 + LOOKUP ON serve where serve.start_year > 1997 and serve.end_year < 2020 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | '101' | '201' | 0 | - | '103' | '203' | 0 | - | '121' | '201' | 0 | + | src | dst | rank | + | '101' | '201' | 0 | + | '103' | '203' | 0 | + | '121' | '201' | 0 | When executing query: """ - LOOKUP ON serve where serve.start_year > 2000 and serve.end_year < 2020 + LOOKUP ON serve where serve.start_year > 2000 and serve.end_year < 2020 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ - LOOKUP ON like where like.likeness > 89 + LOOKUP ON like where like.likeness > 89 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | '100' | '101' | 0 | - | '101' | '102' | 0 | - | '105' | '106' | 0 | + | src | dst | rank | + | '100' | '101' | 0 | + | '101' | '102' | 0 | + | '105' | '106' | 0 | When executing query: """ - LOOKUP ON like where like.likeness < 39 + LOOKUP ON like where like.likeness < 39 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ - LOOKUP ON player where player.age == 35 + LOOKUP ON player where player.age == 35 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '103' | + | id | + | '103' | When executing query: """ - LOOKUP ON player where player.age > 0 + LOOKUP ON player where player.age > 0 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '100' | - | '101' | - | '102' | - | '103' | - | '104' | - | '105' | - | '106' | - | '121' | + | id | + | '100' | + | '101' | + | '102' | + | '103' | + | '104' | + | '105' | + | '106' | + | '121' | When executing query: """ - LOOKUP ON player where player.age < 100 + LOOKUP ON player where player.age < 100 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '100' | - | '101' | - | '102' | - | '103' | - | '104' | - | '105' | - | '106' | - | '121' | + | id | + | '100' | + | '101' | + | '102' | + | '103' | + | '104' | + | '105' | + | '106' | + | '121' | When executing query: """ - LOOKUP ON player where player.name == "Useless" + LOOKUP ON player where player.name == "Useless" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '121' | + | id | + | '121' | When executing query: """ - LOOKUP ON player where player.name == "Useless" and player.age < 30 + LOOKUP ON player where player.name == "Useless" and player.age < 30 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '121' | + | id | + | '121' | When executing query: """ - LOOKUP ON team where team.name == "Warriors" + LOOKUP ON team where team.name == "Warriors" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '200' | + | id | + | '200' | When executing query: """ - LOOKUP ON team where team.name == "oopp" + LOOKUP ON team where team.name == "oopp" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '202' | + | id | + | '202' | Then drop the used space Scenario: LookupTest no index to use at runtime @@ -970,7 +972,7 @@ Feature: LookUpTest_Vid_String Then the execution should be successful When executing query: """ - LOOKUP ON player WHERE player.name == 'Tim' + LOOKUP ON player WHERE player.name == 'Tim' YIELD vertex as node """ Then an ExecutionError should be raised at runtime: There is no index to use at runtime Then drop the used space diff --git a/tests/tck/features/lookup/LookUpLimit.feature b/tests/tck/features/lookup/LookUpLimit.feature index e9461ef699a..d66456c4408 100644 --- a/tests/tck/features/lookup/LookUpLimit.feature +++ b/tests/tck/features/lookup/LookUpLimit.feature @@ -9,10 +9,10 @@ Feature: Push Limit down IndexScan Rule Scenario: push limit down to IndexScan When profiling query: """ - LOOKUP ON player | Limit 2 | ORDER BY $-.VertexID + LOOKUP ON player YIELD id(vertex) as id | Limit 2 | ORDER BY $-.id """ Then the result should be, in any order: - | VertexID | + | id | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | And the execution plan should be: @@ -25,12 +25,12 @@ Feature: Push Limit down IndexScan Rule | 0 | Start | | | When profiling query: """ - LOOKUP ON like | Limit 2 | ORDER BY $-.SrcVID + LOOKUP ON like YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank | Limit 2 | ORDER BY $-.src """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | src | dst | rank | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | And the execution plan should be: | id | name | dependencies | operator info | | 4 | DataCollect | 5 | | @@ -41,10 +41,10 @@ Feature: Push Limit down IndexScan Rule | 0 | Start | | | When profiling query: """ - LOOKUP ON player WHERE player.age == 33 | Limit 2 | ORDER BY $-.VertexID + LOOKUP ON player WHERE player.age == 33 YIELD id(vertex) as id | Limit 2 | ORDER BY $-.id """ Then the result should be, in any order: - | VertexID | + | id | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | And the execution plan should be: @@ -57,12 +57,12 @@ Feature: Push Limit down IndexScan Rule | 0 | Start | | | When profiling query: """ - LOOKUP ON like WHERE like.likeness == 90 | Limit 2 | ORDER BY $-.SrcVID + LOOKUP ON like WHERE like.likeness == 90 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank | Limit 2 | ORDER BY $-.src """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | src | dst | rank | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | And the execution plan should be: | id | name | dependencies | operator info | | 4 | DataCollect | 5 | | @@ -75,10 +75,10 @@ Feature: Push Limit down IndexScan Rule Scenario: push limit down to IndexScan with limit When profiling query: """ - LOOKUP ON player | LIMIT 3 | ORDER BY $-.VertexID + LOOKUP ON player YIELD id(vertex) as id | LIMIT 3 | ORDER BY $-.id """ Then the result should be, in any order: - | VertexID | + | id | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | @@ -92,13 +92,13 @@ Feature: Push Limit down IndexScan Rule | 9 | Start | | | When profiling query: """ - LOOKUP ON like | LIMIT 3 | ORDER BY $-.SrcVID + LOOKUP ON like YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank | LIMIT 3 | ORDER BY $-.src """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | src | dst | rank | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | And the execution plan should be: | id | name | dependencies | operator info | | 3 | DataCollect | 4 | | @@ -109,10 +109,10 @@ Feature: Push Limit down IndexScan Rule | 9 | Start | | | When profiling query: """ - LOOKUP ON player WHERE player.age == 33 | LIMIT 3 | ORDER BY $-.VertexID + LOOKUP ON player WHERE player.age == 33 YIELD id(vertex) as id | LIMIT 3 | ORDER BY $-.id """ Then the result should be, in any order: - | VertexID | + | id | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | | /[a-zA-Z ']+/ | @@ -126,13 +126,13 @@ Feature: Push Limit down IndexScan Rule | 9 | Start | | | When profiling query: """ - LOOKUP ON like WHERE like.likeness == 90 | LIMIT 3 | ORDER BY $-.SrcVID + LOOKUP ON like WHERE like.likeness == 90 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank | LIMIT 3 | ORDER BY $-.src """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | - | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | src | dst | rank | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | + | /[a-zA-Z ']+/ | /[a-zA-Z ']+/ | /\d+/ | And the execution plan should be: | id | name | dependencies | operator info | | 3 | DataCollect | 4 | | diff --git a/tests/tck/features/lookup/LookupEdge.feature b/tests/tck/features/lookup/LookupEdge.feature index b4cac6c2e86..f4091c103a6 100644 --- a/tests/tck/features/lookup/LookupEdge.feature +++ b/tests/tck/features/lookup/LookupEdge.feature @@ -54,10 +54,12 @@ Feature: Test lookup on edge index lookup_edge_1 WHERE + YIELD + src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | '200' | '201' | 0 | + | src | dst | rank | + | '200' | '201' | 0 | When executing query: """ LOOKUP ON @@ -70,8 +72,8 @@ Feature: Test lookup on edge index lookup_edge_1.col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col1 | col2 | lookup_edge_1.col3 | - | '200' | '201' | 0 | 201 | 201 | 201 | + | col1 | col2 | lookup_edge_1.col3 | + | 201 | 201 | 201 | Then drop the used space Scenario Outline: [edge] different condition and yield test for int vid @@ -101,10 +103,12 @@ Feature: Test lookup on edge index lookup_edge_1 WHERE + YIELD + src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | 200 | 201 | 0 | + | src | dst | rank | + | 200 | 201 | 0 | When executing query: """ LOOKUP ON @@ -117,8 +121,8 @@ Feature: Test lookup on edge index lookup_edge_1.col3 """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | col1 | col2 | lookup_edge_1.col3 | - | 200 | 201 | 0 | 201 | 201 | 201 | + | col1 | col2 | lookup_edge_1.col3 | + | 201 | 201 | 201 | Then drop the used space # TODO(yee): Test bool expression diff --git a/tests/tck/features/lookup/LookupEdge2.feature b/tests/tck/features/lookup/LookupEdge2.feature index aeac34348f3..4e57deaf55d 100644 --- a/tests/tck/features/lookup/LookupEdge2.feature +++ b/tests/tck/features/lookup/LookupEdge2.feature @@ -29,29 +29,29 @@ Feature: Test lookup on edge index 2 Scenario Outline: [edge] Simple test cases When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 OR lookup_edge_1.col2 == 201 AND lookup_edge_1.col3 == 202 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 OR lookup_edge_1.col2 == 201 AND lookup_edge_1.col3 == 202 YIELD edge as e """ Then the execution should be successful When executing query: """ - LOOKUP ON lookup_edge_1 WHERE col1 == 201 + LOOKUP ON lookup_edge_1 WHERE col1 == 201 YIELD edge as e """ Then a SemanticError should be raised at runtime: Expression (col1==201) not supported yet When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 OR lookup_edge_1.col5 == 201 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 OR lookup_edge_1.col5 == 201 YIELD edge as e """ Then a SemanticError should be raised at runtime: Invalid column: col5 When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 300 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 300 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | When executing query: """ - LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 AND lookup_edge_1.col2 > 200 AND lookup_edge_1.col1 > 201 + LOOKUP ON lookup_edge_1 WHERE lookup_edge_1.col1 == 201 AND lookup_edge_1.col2 > 200 AND lookup_edge_1.col1 > 201 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | + | src | dst | rank | Then drop the used space diff --git a/tests/tck/features/lookup/LookupTag.feature b/tests/tck/features/lookup/LookupTag.feature index c499a77b808..001373e878e 100644 --- a/tests/tck/features/lookup/LookupTag.feature +++ b/tests/tck/features/lookup/LookupTag.feature @@ -54,10 +54,12 @@ Feature: Test lookup on tag index lookup_tag_1 WHERE + YIELD + id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | '201' | + | id | + | '201' | When executing query: """ LOOKUP ON @@ -70,8 +72,8 @@ Feature: Test lookup on tag index lookup_tag_1.col3 """ Then the result should be, in any order: - | VertexID | lookup_tag_1.col1 | lookup_tag_1.col2 | lookup_tag_1.col3 | - | '201' | 201 | 201 | 201 | + | lookup_tag_1.col1 | lookup_tag_1.col2 | lookup_tag_1.col3 | + | 201 | 201 | 201 | Then drop the used space Scenario Outline: [tag] different condition and yield test for int vid @@ -102,10 +104,12 @@ Feature: Test lookup on tag index lookup_tag_1 WHERE + YIELD + id(vertex) as id """ Then the result should be, in any order: - | VertexID | - | 201 | + | id | + | 201 | When executing query: """ LOOKUP ON @@ -118,8 +122,8 @@ Feature: Test lookup on tag index lookup_tag_1.col3 """ Then the result should be, in any order: - | VertexID | lookup_tag_1.col1 | lookup_tag_1.col2 | lookup_tag_1.col3 | - | 201 | 201 | 201 | 201 | + | lookup_tag_1.col1 | lookup_tag_1.col2 | lookup_tag_1.col3 | + | 201 | 201 | 201 | Then drop the used space # TODO(yee): Test bool expression diff --git a/tests/tck/features/lookup/LookupTag2.feature b/tests/tck/features/lookup/LookupTag2.feature index 50616a8a4ef..1ed10b7d328 100644 --- a/tests/tck/features/lookup/LookupTag2.feature +++ b/tests/tck/features/lookup/LookupTag2.feature @@ -30,31 +30,31 @@ Feature: Test lookup on tag index 2 Scenario Outline: [tag] simple tag test cases When executing query: """ - LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 201 OR lookup_tag_1.col2 == 201 AND lookup_tag_1.col3 == 202 + LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 201 OR lookup_tag_1.col2 == 201 AND lookup_tag_1.col3 == 202 YIELD vertex as node """ Then the execution should be successful When executing query: """ - LOOKUP ON lookup_tag_1 WHERE col1 == 200; + LOOKUP ON lookup_tag_1 WHERE col1 == 200 YIELD vertex as node; """ Then a SemanticError should be raised at runtime: Expression (col1==200) not supported yet When executing query: """ - LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 200 OR lookup_tag_1.col5 == 20; + LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 200 OR lookup_tag_1.col5 == 20 YIELD vertex as node; """ Then a SemanticError should be raised at runtime: Invalid column: col5 When executing query: """ - LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 300 + LOOKUP ON lookup_tag_1 WHERE lookup_tag_1.col1 == 300 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - lookup on lookup_tag_1 WHERE lookup_tag_1.col1 == 201 AND lookup_tag_1.col2 > 200 AND lookup_tag_1.col1 > 201 + lookup on lookup_tag_1 WHERE lookup_tag_1.col1 == 201 AND lookup_tag_1.col2 > 200 AND lookup_tag_1.col1 > 201 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | Then drop the used space Scenario Outline: [tag] scan without hints @@ -64,9 +64,11 @@ Feature: Test lookup on tag index 2 lookup_tag_1 WHERE lookup_tag_1.col1 != 200 + YIELD + id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | | | | When executing query: @@ -80,9 +82,9 @@ Feature: Test lookup on tag index 2 lookup_tag_1.col3 """ Then the result should be, in any order: - | VertexID | col1 | lookup_tag_1.col3 | - | | 201 | 201 | - | | 202 | 202 | + | col1 | lookup_tag_1.col3 | + | 201 | 201 | + | 202 | 202 | Then drop the used space # TODO(yee): Test bool expression diff --git a/tests/tck/features/lookup/Output.feature b/tests/tck/features/lookup/Output.feature index 27e64f0a7c0..3492f20c2b6 100644 --- a/tests/tck/features/lookup/Output.feature +++ b/tests/tck/features/lookup/Output.feature @@ -6,8 +6,8 @@ Feature: Lookup with output Scenario: [1] tag output When executing query: """ - LOOKUP ON player WHERE player.age == 40 | - FETCH PROP ON player $-.VertexID YIELD player.name + LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as id | + FETCH PROP ON player $-.id YIELD player.name """ Then the result should be, in any order: | player.name | @@ -28,8 +28,8 @@ Feature: Lookup with output Scenario: [1] tag output by var When executing query: """ - $a = LOOKUP ON player WHERE player.age == 40; - FETCH PROP ON player $a.VertexID YIELD player.name + $a = LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as id; + FETCH PROP ON player $a.id YIELD player.name """ Then the result should be, in any order: | player.name | @@ -51,8 +51,8 @@ Feature: Lookup with output When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year | - FETCH PROP ON serve $-.SrcVID->$-.DstVID YIELD serve.start_year + YIELD serve.start_year, src(edge) as src, dst(edge) as dst | + FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year """ Then the result should be, in any order: | serve.start_year | @@ -63,8 +63,8 @@ Feature: Lookup with output When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year AS startYear | - FETCH PROP ON serve $-.SrcVID->$-.DstVID YIELD serve.start_year AS startYear + YIELD serve.start_year AS startYear, src(edge) as src, dst(edge) as dst | + FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year AS startYear """ Then the result should be, in any order: | startYear | @@ -75,8 +75,8 @@ Feature: Lookup with output When executing query: """ $a = LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year; - FETCH PROP ON serve $a.SrcVID->$a.DstVID YIELD serve.start_year + YIELD serve.start_year, src(edge) as src, dst(edge) as dst; + FETCH PROP ON serve $a.src->$a.dst YIELD serve.start_year """ Then the result should be, in any order: | serve.start_year | @@ -87,8 +87,8 @@ Feature: Lookup with output When executing query: """ $a = LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year AS startYear; - FETCH PROP ON serve $a.SrcVID->$a.DstVID YIELD serve.start_year AS startYear + YIELD serve.start_year AS startYear, src(edge) as src, dst(edge) as dst; + FETCH PROP ON serve $a.src->$a.dst YIELD serve.start_year AS startYear """ Then the result should be, in any order: | startYear | diff --git a/tests/tck/features/lookup/Output.intVid.feature b/tests/tck/features/lookup/Output.intVid.feature index d177623e6df..578f9c5bcfa 100644 --- a/tests/tck/features/lookup/Output.intVid.feature +++ b/tests/tck/features/lookup/Output.intVid.feature @@ -6,8 +6,8 @@ Feature: Lookup with output in integer vid Scenario: [1] tag output When executing query: """ - LOOKUP ON player WHERE player.age == 40 | - FETCH PROP ON player $-.VertexID YIELD player.name + LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as id | + FETCH PROP ON player $-.id YIELD player.name """ Then the result should be, in any order: | player.name | @@ -17,8 +17,8 @@ Feature: Lookup with output in integer vid Scenario: [1] tag output with yield rename When executing query: """ - LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name | - FETCH PROP ON player $-.VertexID YIELD player.name AS name + LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name, id(vertex) as id | + FETCH PROP ON player $-.id YIELD player.name AS name """ Then the result should be, in any order: | name | @@ -28,8 +28,8 @@ Feature: Lookup with output in integer vid Scenario: [1] tag output by var When executing query: """ - $a = LOOKUP ON player WHERE player.age == 40; - FETCH PROP ON player $a.VertexID YIELD player.name + $a = LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as id; + FETCH PROP ON player $a.id YIELD player.name """ Then the result should be, in any order: | player.name | @@ -39,8 +39,8 @@ Feature: Lookup with output in integer vid Scenario: [1] tag output with yield rename by var When executing query: """ - $a = LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name; - FETCH PROP ON player $a.VertexID YIELD player.name AS name + $a = LOOKUP ON player WHERE player.age == 40 YIELD id(vertex) as id, player.name AS name; + FETCH PROP ON player $a.id YIELD player.name AS name """ Then the result should be, in any order: | name | @@ -51,8 +51,8 @@ Feature: Lookup with output in integer vid When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year | - FETCH PROP ON serve $-.SrcVID->$-.DstVID YIELD serve.start_year + YIELD src(edge) as src, dst(edge) as dst, serve.start_year | + FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year """ Then the result should be, in any order: | serve.start_year | @@ -63,8 +63,8 @@ Feature: Lookup with output in integer vid When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year AS startYear | - FETCH PROP ON serve $-.SrcVID->$-.DstVID YIELD serve.start_year AS startYear + YIELD serve.start_year AS startYear, src(edge) as src, dst(edge) as dst | + FETCH PROP ON serve $-.src->$-.dst YIELD serve.start_year AS startYear """ Then the result should be, in any order: | startYear | @@ -75,8 +75,8 @@ Feature: Lookup with output in integer vid When executing query: """ $a = LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year; - FETCH PROP ON serve $a.SrcVID->$a.DstVID YIELD serve.start_year + YIELD serve.start_year, src(edge) as src, dst(edge) as dst; + FETCH PROP ON serve $a.src->$a.dst YIELD serve.start_year """ Then the result should be, in any order: | serve.start_year | @@ -87,8 +87,8 @@ Feature: Lookup with output in integer vid When executing query: """ $a = LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 - YIELD serve.start_year AS startYear; - FETCH PROP ON serve $a.SrcVID->$a.DstVID YIELD serve.start_year AS startYear + YIELD serve.start_year AS startYear, src(edge) as src, dst(edge) as dst; + FETCH PROP ON serve $a.src->$a.dst YIELD serve.start_year AS startYear """ Then the result should be, in any order: | startYear | diff --git a/tests/tck/features/lookup/TagIndexFullScan.feature b/tests/tck/features/lookup/TagIndexFullScan.feature index 09169ed8d8b..71021b65c95 100644 --- a/tests/tck/features/lookup/TagIndexFullScan.feature +++ b/tests/tck/features/lookup/TagIndexFullScan.feature @@ -6,17 +6,17 @@ Feature: Lookup tag index full scan Scenario: Tag with relational RegExp filter[1] When executing query: """ - LOOKUP ON team where team.name =~ "\\d+\\w+" + LOOKUP ON team where team.name =~ "\\d+\\w+" YIELD vertex as node """ Then a SemanticError should be raised at runtime: Expression (team.name=~"\d+\w+") is not supported, please use full-text index as an optimal solution Scenario: Tag with relational NE filter When profiling query: """ - LOOKUP ON team WHERE team.name != "Hornets" + LOOKUP ON team WHERE team.name != "Hornets" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "76ers" | | "Bucks" | | "Bulls" | @@ -57,10 +57,10 @@ Feature: Lookup tag index full scan Scenario: Tag with simple relational IN filter When profiling query: """ - LOOKUP ON team WHERE team.name IN ["Hornets", "Jazz"] + LOOKUP ON team WHERE team.name IN ["Hornets", "Jazz"] YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Jazz" | | "Hornets" | And the execution plan should be: @@ -70,16 +70,16 @@ Feature: Lookup tag index full scan | 0 | Start | | | When executing query: """ - LOOKUP ON team WHERE team.name IN ["non-existed-name"] + LOOKUP ON team WHERE team.name IN ["non-existed-name"] YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When profiling query: """ - LOOKUP ON player WHERE player.age IN [40 - 1 , 72/2] YIELD player.age + LOOKUP ON player WHERE player.age IN [40 - 1 , 72/2] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Amar'e Stoudemire" | 36 | | "Tracy McGrady" | 39 | | "Tony Parker" | 36 | @@ -92,10 +92,10 @@ Feature: Lookup tag index full scan # (a IN b) OR c When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] OR player.name == "ABC" YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] OR player.name == "ABC" YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Dirk Nowitzki" | 40 | | "Joel Embiid" | 25 | | "Kobe Bryant" | 40 | @@ -108,10 +108,10 @@ Feature: Lookup tag index full scan # (a IN b) OR (c IN d) When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] OR player.name IN ["Kobe Bryant"] YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] OR player.name IN ["Kobe Bryant"] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Dirk Nowitzki" | 40 | | "Joel Embiid" | 25 | | "Kobe Bryant" | 40 | @@ -124,10 +124,10 @@ Feature: Lookup tag index full scan # (a IN b) AND c When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] AND player.name == "Kobe Bryant" YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] AND player.name == "Kobe Bryant" YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -136,10 +136,10 @@ Feature: Lookup tag index full scan | 0 | Start | | | When profiling query: """ - LOOKUP ON player WHERE player.name IN ["Kobe Bryant", "Tim Duncan"] AND player.age > 30 + LOOKUP ON player WHERE player.name IN ["Kobe Bryant", "Tim Duncan"] AND player.age > 30 YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Kobe Bryant" | | "Tim Duncan" | And the execution plan should be: @@ -150,10 +150,10 @@ Feature: Lookup tag index full scan # c AND (a IN b) When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] AND player.name == "Kobe Bryant" YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] AND player.name == "Kobe Bryant" YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -167,10 +167,10 @@ Feature: Lookup tag index full scan # (a IN b) AND (c IN d) while a, c both have indexes When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -191,10 +191,10 @@ Feature: Lookup tag index full scan Then wait the job to finish When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -217,10 +217,10 @@ Feature: Lookup tag index full scan # since the tag index has been dropped, here a TagIndexFullScan should be performed When profiling query: """ - LOOKUP ON player WHERE player.name IN ["ABC", "Kobe Bryant"] YIELD player.age + LOOKUP ON player WHERE player.name IN ["ABC", "Kobe Bryant"] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -230,10 +230,10 @@ Feature: Lookup tag index full scan | 0 | Start | | | When profiling query: """ - LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD player.age + LOOKUP ON player WHERE player.age IN [40, 25] AND player.name IN ["ABC", "Kobe Bryant"] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Kobe Bryant" | 40 | And the execution plan should be: | id | name | dependencies | operator info | @@ -245,10 +245,10 @@ Feature: Lookup tag index full scan Scenario: Tag with relational NOT IN filter When profiling query: """ - LOOKUP ON team WHERE team.name NOT IN ["Hornets", "Jazz"] + LOOKUP ON team WHERE team.name NOT IN ["Hornets", "Jazz"] YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "76ers" | | "Bucks" | | "Bulls" | @@ -285,10 +285,10 @@ Feature: Lookup tag index full scan | 0 | Start | | | When profiling query: """ - LOOKUP ON player WHERE player.age NOT IN [40 - 1 , 72/2] YIELD player.age + LOOKUP ON player WHERE player.age NOT IN [40 - 1 , 72/2] YIELD id(vertex) as id, player.age """ Then the result should be, in any order: - | VertexID | player.age | + | id | player.age | | "Yao Ming" | 38 | | "Aron Baynes" | 32 | | "Ben Simmons" | 22 | @@ -351,22 +351,22 @@ Feature: Lookup tag index full scan Scenario: Tag with relational CONTAINS/NOT CONTAINS filter When executing query: """ - LOOKUP ON team WHERE team.name CONTAINS "ABC" + LOOKUP ON team WHERE team.name CONTAINS "ABC" YIELD vertex as node """ Then a SemanticError should be raised at runtime: Expression (team.name CONTAINS "ABC") is not supported, please use full-text index as an optimal solution When executing query: """ - LOOKUP ON team WHERE team.name NOT CONTAINS "ABC" + LOOKUP ON team WHERE team.name NOT CONTAINS "ABC" YIELD vertex as node """ Then a SemanticError should be raised at runtime: Expression (team.name NOT CONTAINS "ABC") is not supported, please use full-text index as an optimal solution Scenario: Tag with relational STARTS WITH filter When profiling query: """ - LOOKUP ON team WHERE team.name STARTS WITH toUpper("t") + LOOKUP ON team WHERE team.name STARTS WITH toUpper("t") YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | | "Trail Blazers" | | "Timberwolves" | | "Thunders" | @@ -378,29 +378,29 @@ Feature: Lookup tag index full scan | 0 | Start | | | When executing query: """ - LOOKUP ON team WHERE team.name STARTS WITH "ABC" + LOOKUP ON team WHERE team.name STARTS WITH "ABC" YIELD id(vertex) as id """ Then the result should be, in any order: - | VertexID | + | id | When executing query: """ - LOOKUP ON team WHERE team.name STARTS WITH 123 + LOOKUP ON team WHERE team.name STARTS WITH 123 YIELD id(vertex) """ Then a SemanticError should be raised at runtime: Column type error : name When profiling query: """ - LOOKUP ON team WHERE team.name NOT STARTS WITH toUpper("t") + LOOKUP ON team WHERE team.name NOT STARTS WITH toUpper("t") YIELD id(vertex) """ Then a SemanticError should be raised at runtime: Expression (team.name NOT STARTS WITH toUpper("t")) is not supported, please use full-text index as an optimal solution Scenario: Tag with relational ENDS/NOT ENDS WITH filter When executing query: """ - LOOKUP ON team WHERE team.name ENDS WITH toLower("S") + LOOKUP ON team WHERE team.name ENDS WITH toLower("S") YIELD id(vertex) """ Then a SemanticError should be raised at runtime: Expression (team.name ENDS WITH toLower("S")) is not supported, please use full-text index as an optimal solution When executing query: """ - LOOKUP ON team WHERE team.name NOT ENDS WITH toLower("S") + LOOKUP ON team WHERE team.name NOT ENDS WITH toLower("S") YIELD id(vertex) """ Then a SemanticError should be raised at runtime: Expression (team.name NOT ENDS WITH toLower("S")) is not supported, please use full-text index as an optimal solution diff --git a/tests/tck/features/lookup/WithYield.feature b/tests/tck/features/lookup/WithYield.feature index 3cacd5e1a04..9292d71d3d7 100644 --- a/tests/tck/features/lookup/WithYield.feature +++ b/tests/tck/features/lookup/WithYield.feature @@ -9,25 +9,25 @@ Feature: Lookup with yield LOOKUP ON player WHERE player.age == 40 YIELD player.name """ Then the result should be, in any order: - | VertexID | player.name | - | 'Kobe Bryant' | 'Kobe Bryant' | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | + | player.name | + | 'Kobe Bryant' | + | 'Dirk Nowitzki' | When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name, player.age + 1 """ Then the result should be, in any order: - | VertexID | player.name | (player.age+1) | - | 'Kobe Bryant' | 'Kobe Bryant' | 41 | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | 41 | + | player.name | (player.age+1) | + | 'Kobe Bryant' | 41 | + | 'Dirk Nowitzki' | 41 | When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name, player.age + 1, vertex as node """ Then the result should be, in any order: - | VertexID | player.name | (player.age+1) | node | - | 'Kobe Bryant' | 'Kobe Bryant' | 41 | ("Kobe Bryant" : player {age : 40, name : "Kobe Bryant"}) | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | 41 | ("Dirk Nowitzki" : player {age : 40, name : "Dirk Nowitzki"}) | + | player.name | (player.age+1) | node | + | 'Kobe Bryant' | 41 | ("Kobe Bryant" : player {age : 40, name : "Kobe Bryant"}) | + | 'Dirk Nowitzki' | 41 | ("Dirk Nowitzki" : player {age : 40, name : "Dirk Nowitzki"}) | Scenario: [1] tag with yield rename When executing query: @@ -35,18 +35,18 @@ Feature: Lookup with yield LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name """ Then the result should be, in any order: - | VertexID | name | - | 'Kobe Bryant' | 'Kobe Bryant' | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | + | name | + | 'Kobe Bryant' | + | 'Dirk Nowitzki' | When executing query: """ LOOKUP ON team WHERE team.name in ["76ers", "Lakers", "Spurs"] YIELD vertex AS node """ Then the result should be, in any order: - | VertexID | node | - | '76ers' | ("76ers" : team {name : "76ers"}) | - | 'Lakers' | ("Lakers" : team {name : "Lakers"}) | - | 'Spurs' | ("Spurs" : team {name : "Spurs"}) | + | node | + | ("76ers" : team {name : "76ers"}) | + | ("Lakers" : team {name : "Lakers"}) | + | ("Spurs" : team {name : "Spurs"}) | Scenario: [2] edge with yield When executing query: @@ -55,18 +55,18 @@ Feature: Lookup with yield YIELD serve.start_year """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | serve.start_year | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | + | serve.start_year | + | 2008 | + | 2008 | When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 YIELD serve.start_year, edge as relationship """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | serve.start_year | relationship | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | [:serve "Russell Westbrook"->"Thunders" @0 {end_year: 2019, start_year: 2008}] | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | [:serve "Marc Gasol"->"Grizzlies" @0 {end_year: 2019, start_year: 2008}] | + | serve.start_year | relationship | + | 2008 | [:serve "Russell Westbrook"->"Thunders" @0 {end_year: 2019, start_year: 2008}] | + | 2008 | [:serve "Marc Gasol"->"Grizzlies" @0 {end_year: 2019, start_year: 2008}] | Scenario: [2] edge with yield rename When executing query: @@ -75,21 +75,21 @@ Feature: Lookup with yield YIELD serve.start_year AS startYear """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | startYear | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | + | startYear | + | 2008 | + | 2008 | When executing query: """ LOOKUP ON like WHERE like.likeness < 50 + 1 YIELD like.likeness, edge as relationship """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | like.likeness | relationship | - | "Blake Griffin" | "Chris Paul" | 0 | -1 | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | - | "Dirk Nowitzki" | "Dwyane Wade" | 0 | 10 | [:like "Dirk Nowitzki"->"Dwyane Wade" @0 {likeness: 10}] | - | "Kyrie Irving" | "LeBron James" | 0 | 13 | [:like "Kyrie Irving"->"LeBron James" @0 {likeness: 13}] | - | "Marco Belinelli" | "Tony Parker" | 0 | 50 | [:like "Marco Belinelli"->"Tony Parker" @0 {likeness: 50}] | - | "Rajon Rondo" | "Ray Allen" | 0 | -1 | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | - | "Ray Allen" | "Rajon Rondo" | 0 | 9 | [:like "Ray Allen"->"Rajon Rondo" @0 {likeness: 9}] | + | like.likeness | relationship | + | -1 | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | + | 10 | [:like "Dirk Nowitzki"->"Dwyane Wade" @0 {likeness: 10}] | + | 13 | [:like "Kyrie Irving"->"LeBron James" @0 {likeness: 13}] | + | 50 | [:like "Marco Belinelli"->"Tony Parker" @0 {likeness: 50}] | + | -1 | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | + | 9 | [:like "Ray Allen"->"Rajon Rondo" @0 {likeness: 9}] | When executing query: """ LOOKUP ON like WHERE like.likeness < 50 + 1 YIELD like.likeness, edge as relationship | YIELD count(*) as nums diff --git a/tests/tck/features/lookup/WithYield.intVid.feature b/tests/tck/features/lookup/WithYield.intVid.feature index f685ca643d1..03d005b7719 100644 --- a/tests/tck/features/lookup/WithYield.intVid.feature +++ b/tests/tck/features/lookup/WithYield.intVid.feature @@ -8,45 +8,45 @@ Feature: Lookup with yield in integer vid """ LOOKUP ON player WHERE player.age == 40 YIELD player.name """ - Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | player.name | - | 'Kobe Bryant' | 'Kobe Bryant' | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | + Then the result should be, in any order: + | player.name | + | 'Kobe Bryant' | + | 'Dirk Nowitzki' | When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name, player.age + 1 """ - Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | player.name | (player.age+1) | - | 'Kobe Bryant' | 'Kobe Bryant' | 41 | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | 41 | + Then the result should be, in any order: + | player.name | (player.age+1) | + | 'Kobe Bryant' | 41 | + | 'Dirk Nowitzki' | 41 | When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name, player.age + 1, vertex as node """ - Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | player.name | (player.age+1) | node | - | 'Kobe Bryant' | 'Kobe Bryant' | 41 | ("Kobe Bryant" : player {age : 40, name : "Kobe Bryant"}) | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | 41 | ("Dirk Nowitzki" : player {age : 40, name : "Dirk Nowitzki"}) | + Then the result should be, in any order: + | player.name | (player.age+1) | node | + | 'Kobe Bryant' | 41 | ("Kobe Bryant" : player {age : 40, name : "Kobe Bryant"}) | + | 'Dirk Nowitzki' | 41 | ("Dirk Nowitzki" : player {age : 40, name : "Dirk Nowitzki"}) | Scenario: [1] tag with yield rename When executing query: """ LOOKUP ON player WHERE player.age == 40 YIELD player.name AS name """ - Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | name | - | 'Kobe Bryant' | 'Kobe Bryant' | - | 'Dirk Nowitzki' | 'Dirk Nowitzki' | + Then the result should be, in any order: + | name | + | 'Kobe Bryant' | + | 'Dirk Nowitzki' | When executing query: """ LOOKUP ON team WHERE team.name in ["76ers", "Lakers", "Spurs"] YIELD vertex AS node """ - Then the result should be, in any order, and the columns 0 should be hashed: - | VertexID | node | - | '76ers' | ("76ers" : team {name : "76ers"}) | - | 'Lakers' | ("Lakers" : team {name : "Lakers"}) | - | 'Spurs' | ("Spurs" : team {name : "Spurs"}) | + Then the result should be, in any order: + | node | + | ("76ers" : team {name : "76ers"}) | + | ("Lakers" : team {name : "Lakers"}) | + | ("Spurs" : team {name : "Spurs"}) | Scenario: [2] edge with yield When executing query: @@ -54,19 +54,19 @@ Feature: Lookup with yield in integer vid LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 YIELD serve.start_year """ - Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | serve.start_year | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | + Then the result should be, in any order: + | serve.start_year | + | 2008 | + | 2008 | When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 YIELD serve.start_year, edge as relationship """ - Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | serve.start_year | relationship | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | [:serve "Russell Westbrook"->"Thunders" @0 {end_year: 2019, start_year: 2008}] | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | [:serve "Marc Gasol"->"Grizzlies" @0 {end_year: 2019, start_year: 2008}] | + Then the result should be, in any order: + | serve.start_year | relationship | + | 2008 | [:serve "Russell Westbrook"->"Thunders" @0 {end_year: 2019, start_year: 2008}] | + | 2008 | [:serve "Marc Gasol"->"Grizzlies" @0 {end_year: 2019, start_year: 2008}] | Scenario: [2] edge with yield rename When executing query: @@ -74,22 +74,22 @@ Feature: Lookup with yield in integer vid LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 YIELD serve.start_year AS startYear """ - Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | startYear | - | 'Russell Westbrook' | 'Thunders' | 0 | 2008 | - | 'Marc Gasol' | 'Grizzlies' | 0 | 2008 | + Then the result should be, in any order: + | startYear | + | 2008 | + | 2008 | When executing query: """ LOOKUP ON like WHERE like.likeness < 50 + 1 YIELD like.likeness, edge as relationship """ - Then the result should be, in any order, and the columns 0,1 should be hashed: - | SrcVID | DstVID | Ranking | like.likeness | relationship | - | "Blake Griffin" | "Chris Paul" | 0 | -1 | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | - | "Dirk Nowitzki" | "Dwyane Wade" | 0 | 10 | [:like "Dirk Nowitzki"->"Dwyane Wade" @0 {likeness: 10}] | - | "Kyrie Irving" | "LeBron James" | 0 | 13 | [:like "Kyrie Irving"->"LeBron James" @0 {likeness: 13}] | - | "Marco Belinelli" | "Tony Parker" | 0 | 50 | [:like "Marco Belinelli"->"Tony Parker" @0 {likeness: 50}] | - | "Rajon Rondo" | "Ray Allen" | 0 | -1 | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | - | "Ray Allen" | "Rajon Rondo" | 0 | 9 | [:like "Ray Allen"->"Rajon Rondo" @0 {likeness: 9}] | + Then the result should be, in any order: + | like.likeness | relationship | + | -1 | [:like "Blake Griffin"->"Chris Paul" @0 {likeness: -1}] | + | 10 | [:like "Dirk Nowitzki"->"Dwyane Wade" @0 {likeness: 10}] | + | 13 | [:like "Kyrie Irving"->"LeBron James" @0 {likeness: 13}] | + | 50 | [:like "Marco Belinelli"->"Tony Parker" @0 {likeness: 50}] | + | -1 | [:like "Rajon Rondo"->"Ray Allen" @0 {likeness: -1}] | + | 9 | [:like "Ray Allen"->"Rajon Rondo" @0 {likeness: 9}] | When executing query: """ LOOKUP ON serve WHERE serve.start_year == 2008 and serve.end_year == 2019 diff --git a/tests/tck/features/optimizer/CollapseProjectRule.feature b/tests/tck/features/optimizer/CollapseProjectRule.feature index 22712c9cb02..4d65cb3686b 100644 --- a/tests/tck/features/optimizer/CollapseProjectRule.feature +++ b/tests/tck/features/optimizer/CollapseProjectRule.feature @@ -36,9 +36,8 @@ Feature: Collapse Project Rule | 0 | Start | | | When profiling query: """ - LOOKUP ON player - WHERE player.name=='Tim Duncan' - | YIELD $-.VertexID AS vid + LOOKUP ON player WHERE player.name=='Tim Duncan' YIELD id(vertex) as id + | YIELD $-.id AS vid """ Then the result should be, in any order: | vid | diff --git a/tests/tck/features/optimizer/PushFilterDownLeftJoinRule.feature b/tests/tck/features/optimizer/PushFilterDownLeftJoinRule.feature index fe5b88a84e3..0eb9994526b 100644 --- a/tests/tck/features/optimizer/PushFilterDownLeftJoinRule.feature +++ b/tests/tck/features/optimizer/PushFilterDownLeftJoinRule.feature @@ -9,8 +9,8 @@ Feature: Push Filter down LeftJoin rule Scenario: push filter down LeftJoin When profiling query: """ - LOOKUP ON player WHERE player.name=='Tim Duncan' - | YIELD $-.VertexID AS vid + LOOKUP ON player WHERE player.name=='Tim Duncan' YIELD id(vertex) as id + | YIELD $-.id AS vid | GO FROM $-.vid OVER like BIDIRECT WHERE any(x in split($$.player.name, ' ') WHERE x contains 'Ti') YIELD $$.player.name, like._dst AS vid diff --git a/tests/tck/features/schema/CreateSpaceAs.feature b/tests/tck/features/schema/CreateSpaceAs.feature index 4c3fe5073fb..9398dd86bc7 100644 --- a/tests/tck/features/schema/CreateSpaceAs.feature +++ b/tests/tck/features/schema/CreateSpaceAs.feature @@ -53,11 +53,11 @@ Feature: Create space as another space | ("1" :t1{col1: 1}) | When executing query: """ - lookup on t1 where t1.col1 == 1; + lookup on t1 where t1.col1 == 1 YIELD id(vertex) as id; """ Then the result should be, in any order: - | VertexID | - | "1" | + | id | + | "1" | When executing query: """ fetch prop on e1 "1" -> "2" YIELD edge as e; @@ -67,11 +67,11 @@ Feature: Create space as another space | [:e1 "1"->"2" @0 {col1: 1}] | When executing query: """ - lookup on e1 where e1.col1 == 1; + lookup on e1 where e1.col1 == 1 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank; """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "1" | "2" | 0 | + | src | dst | rank | + | "1" | "2" | 0 | # clone space When clone a new space according to current space And wait 3 seconds @@ -138,11 +138,11 @@ Feature: Create space as another space | ("1" :t1{col1: 2}) | When executing query: """ - lookup on t1 where t1.col1 == 2; + lookup on t1 where t1.col1 == 2 YIELD id(vertex) as id; """ Then the result should be, in any order: - | VertexID | - | "1" | + | id | + | "1" | When executing query: """ fetch prop on e1 "1" -> "2" YIELD edge as e; @@ -152,9 +152,9 @@ Feature: Create space as another space | [:e1 "1"->"2" @0 {col1: 2}] | When executing query: """ - lookup on e1 where e1.col1 == 2; + lookup on e1 where e1.col1 == 2 YIELD src(edge) as src, dst(edge) as dst, rank(edge) as rank; """ Then the result should be, in any order: - | SrcVID | DstVID | Ranking | - | "1" | "2" | 0 | + | src | dst | rank | + | "1" | "2" | 0 | Then drop the used space diff --git a/tests/tck/features/subgraph/subgraph.IntVid.feature b/tests/tck/features/subgraph/subgraph.IntVid.feature index 5c510feec24..fe051ef5dd8 100644 --- a/tests/tck/features/subgraph/subgraph.IntVid.feature +++ b/tests/tck/features/subgraph/subgraph.IntVid.feature @@ -9,12 +9,12 @@ Feature: Integer Vid subgraph Scenario: Integer Vid invalid input When executing query: """ - GET SUBGRAPH WITH PROP FROM $-.id + GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: `$-.id', not exist prop `id' When executing query: """ - GET SUBGRAPH WITH PROP FROM $a.id + GET SUBGRAPH WITH PROP FROM $a.id YIELD edges as relationships """ Then a SemanticError should be raised at runtime: `$a.id', not exist variable `a' When executing query: @@ -34,881 +34,30 @@ Feature: Integer Vid subgraph Then a SyntaxError should be raised at runtime: please add alias when using `edges'. near `edges' When executing query: """ - GO FROM hash("Tim Duncan") OVER like YIELD $$.player.name AS id | GET SUBGRAPH WITH PROP FROM $-.id + GO FROM hash("Tim Duncan") OVER like YIELD $$.player.name AS id | GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as a """ Then a SemanticError should be raised at runtime: `$-.id', the srcs should be type of INT64, but was`STRING' When executing query: """ - $a = GO FROM hash("Tim Duncan") OVER like YIELD $$.player.name AS ID; GET SUBGRAPH WITH PROP FROM $a.ID + $a = GO FROM hash("Tim Duncan") OVER like YIELD $$.player.name AS ID; GET SUBGRAPH WITH PROP FROM $a.ID YIELD edges as b """ Then a SemanticError should be raised at runtime: `$a.ID', the srcs should be type of INT64, but was`STRING' When executing query: """ - $a = GO FROM hash("Tim Duncan") OVER like YIELD like._src AS src; GET SUBGRAPH WITH PROP FROM $b.src + $a = GO FROM hash("Tim Duncan") OVER like YIELD like._src AS src; GET SUBGRAPH WITH PROP FROM $b.src YIELD vertices as a, edges as b """ Then a SemanticError should be raised at runtime: `$b.src', not exist variable `b' When executing query: """ - GO FROM hash("Tim Duncan") OVER like YIELD like._dst AS id, like._src AS id | GET SUBGRAPH WITH PROP FROM $-.id + GO FROM hash("Tim Duncan") OVER like YIELD like._dst AS id, like._src AS id | GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as a """ Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' When executing query: """ - $a = GO FROM hash("Tim Duncan") OVER like YIELD like._dst AS id, like._src AS id; GET SUBGRAPH WITH PROP FROM $a.id + $a = GO FROM hash("Tim Duncan") OVER like YIELD like._dst AS id, like._src AS id; GET SUBGRAPH WITH PROP FROM $a.id YIELD edges as b """ Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' - Scenario: Integer Vid zero step - When executing query: - """ - GET SUBGRAPH WITH PROP 0 STEPS FROM hash("Tim Duncan") - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Tim Duncan")] | - When executing query: - """ - GET SUBGRAPH WITH PROP 0 STEPS FROM hash("Tim Duncan"), hash("Spurs") - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Tim Duncan"), ("Spurs")] | - When executing query: - """ - GET SUBGRAPH WITH PROP 0 STEPS FROM hash("Tim Duncan"), hash("Tony Parker"), hash("Spurs") - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Tim Duncan"), ("Spurs"), ("Tony Parker")] | - When executing query: - """ - GO FROM hash('Tim Duncan') over serve YIELD serve._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Spurs")] | - When executing query: - """ - GO FROM hash('Tim Duncan') over like YIELD like._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Manu Ginobili"), ("Tony Parker")] | - When executing query: - """ - $a = GO FROM hash('Tim Duncan') over serve YIELD serve._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Spurs")] | - When executing query: - """ - $a = GO FROM hash('Tim Duncan') over like YIELD like._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id - """ - Then the result should be, in any order, with relax comparison: - | _vertices | - | [("Manu Ginobili"), ("Tony Parker")] | - - Scenario: Integer Vid subgraph - When executing query: - """ - GET SUBGRAPH WITH PROP FROM hash('Tim Duncan') - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Aron Baynes"->"Spurs"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Boris Diaw"->"Tony Parker"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Tony Parker"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:serve "Tony Parker"->"Spurs"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Dejounte Murray"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - | | | [:like "Dejounte Murray"->"Danny Green"@0] | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | - | | | [:serve "Danny Green"->"Spurs"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: Integer Vid two steps - When executing query: - """ - GET SUBGRAPH WITH PROP 2 STEPS FROM hash('Tim Duncan') - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Cavaliers") | [:serve "LeBron James"->"Cavaliers"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Pistons") | [:serve "LeBron James"->"Cavaliers"@1] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"LeBron James"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Danny Green"->"Marco Belinelli"@0] | ("Kings") | [:serve "Rudy Gay"->"Kings"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:serve "Danny Green"->"Cavaliers"@0] | ("Raptors") | [:serve "Cory Joseph"->"Raptors"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:serve "Danny Green"->"Raptors"@0] | ("Jazz") | [:serve "Rudy Gay"->"Raptors"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:serve "Danny Green"->"Spurs"@0] | ("LeBron James") | [:serve "Tracy McGrady"->"Raptors"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Paul Gasol") | [:like "Chris Paul"->"LeBron James"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("Kyle Anderson") | [:serve "LeBron James"->"Heat"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Rudy Gay") | [:serve "LeBron James"->"Lakers"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Paul Gasol"->"Bulls"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:serve "Manu Ginobili"->"Spurs"@0] | ("Yao Ming") | [:serve "Paul Gasol"->"Lakers"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("James Harden") | [:like "Tracy McGrady"->"Rudy Gay"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:serve "Aron Baynes"->"Celtics"@0] | ("Hornets") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:serve "Aron Baynes"->"Pistons"@0] | ("David West") | [:like "Yao Ming"->"Tracy McGrady"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Aron Baynes"->"Spurs"@0] | ("Chris Paul") | [:like "Russell Westbrook"->"James Harden"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | ("Celtics") | [:like "James Harden"->"Russell Westbrook"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jonathon Simmons") | [:serve "Chris Paul"->"Hornets"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "David West"->"Hornets"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Heat") | [:serve "David West"->"Warriors"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Lakers") | [:serve "Jonathon Simmons"->"76ers"@0] | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Suns") | [:serve "Jonathon Simmons"->"Magic"@0] | - | | | [:like "Yao Ming"->"Shaquille O\'Neal"@0] | ("Magic") | [:serve "JaVale McGee"->"Lakers"@0] | - | | | [:like "Shaquille O\'Neal"->"JaVale McGee"@0] | ("Trail Blazers") | [:serve "Tracy McGrady"->"Magic"@0] | - | | | [:serve "Shaquille O\'Neal"->"Cavaliers"@0] | ("76ers") | [:serve "JaVale McGee"->"Warriors"@0] | - | | | [:serve "Shaquille O\'Neal"->"Celtics"@0] | ("JaVale McGee") | | - | | | [:serve "Shaquille O\'Neal"->"Heat"@0] | ("Cory Joseph") | | - | | | [:serve "Shaquille O\'Neal"->"Lakers"@0] | ("Tracy McGrady") | | - | | | [:serve "Shaquille O\'Neal"->"Magic"@0] | ("Russell Westbrook") | | - | | | [:serve "Shaquille O\'Neal"->"Suns"@0] | ("Bulls") | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Warriors") | | - | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | [:serve "Tony Parker"->"Hornets"@0] | | | - | | | [:serve "Tony Parker"->"Spurs"@0] | | | - | | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | | | - | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | [:serve "Cory Joseph"->"Spurs"@0] | | | - | | | [:serve "David West"->"Spurs"@0] | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | [:serve "Jonathon Simmons"->"Spurs"@0] | | | - | | | [:serve "Kyle Anderson"->"Spurs"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | [:serve "Paul Gasol"->"Spurs"@0] | | | - | | | [:serve "Rudy Gay"->"Spurs"@0] | | | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - - Scenario: Integer Vid in edge - When executing query: - """ - GET SUBGRAPH WITH PROP 2 STEPS FROM hash('Tim Duncan') IN like, serve - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Damian Lillard") | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Danny Green") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("Yao Ming") | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Rudy Gay") | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Marco Belinelli"->"Danny Green"@0] | | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Danny Green"->"Marco Belinelli"@0] | | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Tim Duncan"->"Manu Ginobili"@0] | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | - | | | [:like "Boris Diaw"->"Tony Parker"@0] | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | - | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | - | | | [:like "Tim Duncan"->"Tony Parker"@0] | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | [] | - - Scenario: Integer Vid in and out edge - When executing query: - """ - GET SUBGRAPH WITH PROP 2 STEPS FROM hash('Tim Duncan') IN like OUT serve - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | - | [:serve "Tim Duncan"->"Spurs"@0] | ("LaMarcus Aldridge") | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Danny Green") | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Rudy Gay") | [:serve "Rudy Gay"->"Spurs"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Hornets") | [:serve "Rudy Gay"->"Raptors"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("Heat") | [:serve "Rudy Gay"->"Kings"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("76ers") | | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Aron Baynes") | [:serve "Danny Green"->"Cavaliers"@0] | ("Bulls") | | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Manu Ginobili") | [:serve "Danny Green"->"Raptors"@0] | ("Trail Blazers") | | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Danny Green"->"Spurs"@0] | ("Celtics") | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Danny Green"@0] | ("Kings") | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Marco Belinelli"->"Danny Green"@0] | ("Hawks") | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Spurs") | [:serve "Marco Belinelli"->"76ers"@0] | ("Warriors") | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | ("Cavaliers") | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | ("Raptors") | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | ("Jazz") | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | ("Pistons") | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | ("Lakers") | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | ("Suns") | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | ("Magic") | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | ("Yao Ming") | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | [:serve "Manu Ginobili"->"Spurs"@0] | | | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | - | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Celtics"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Heat"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Magic"@0] | | | - | | | [:serve "Shaquille O'Neal"->"Suns"@0] | | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | - | | | [:serve "Tony Parker"->"Hornets"@0] | | | - | | | [:serve "Tony Parker"->"Spurs"@0] | | | - | | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | [:like "Tim Duncan"->"Tony Parker"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - - Scenario: Integer Vid two steps in and out edge - When executing query: - """ - GET SUBGRAPH WITH PROP 2 STEPS FROM hash('Tim Duncan'), hash('James Harden') IN teammate OUT serve - """ - Then define some list variables: - | vertex1 | edge1 | vertex2 | edge2 | vertex3 | - | ("Tim Duncan") | [:serve "Tim Duncan"->"Spurs"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Hornets") | - | ("James Harden") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Tony Parker") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | - | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Spurs") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | - | | [:serve "James Harden"->"Rockets"@0] | ("Rockets") | [:serve "Tony Parker"->"Hornets"@0] | | - | | [:serve "James Harden"->"Thunders"@0] | ("Thunders") | [:serve "Tony Parker"->"Spurs"@0] | | - | | | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | - | | | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | <[vertex1]> | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | [] | - - Scenario: Integer Vid three steps - When executing query: - """ - GET SUBGRAPH WITH PROP 3 STEPS FROM hash('Paul George') OUT serve BOTH like - """ - Then define some list variables: - | edge1 | edge2 | edge3 | vertex4 | edge4 | - | [:like "Russell Westbrook"->"Paul George"@0] | [:like "Dejounte Murray"->"Russell Westbrook"@0] | [:serve "Dejounte Murray"->"Spurs"@0] | ("Kyle Anderson") | [:like "Tony Parker"->"Tim Duncan"@0] | - | [:serve "Paul George"->"Pacers"@0] | [:like "James Harden"->"Russell Westbrook"@0] | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Tony Parker") | [:serve "Kyle Anderson"->"Spurs"@0] | - | [:serve "Paul George"->"Thunders"@0] | [:serve "Russell Westbrook"->"Thunders"@0] | [:like "Dejounte Murray"->"Danny Green"@0] | ("Danny Green") | [:like "Marco Belinelli"->"Danny Green"@0] | - | [:like "Paul George"->"Russell Westbrook"@0] | [:like "Russell Westbrook"->"James Harden"@0] | [:like "Dejounte Murray"->"James Harden"@0] | ("Luka Doncic") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Tim Duncan") | [:serve "Tony Parker"->"Spurs"@0] | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("Marco Belinelli") | [:serve "Danny Green"->"Spurs"@0] | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | ("Kevin Durant") | [:like "Danny Green"->"LeBron James"@0] | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("Manu Ginobili") | [:like "Danny Green"->"Marco Belinelli"@0] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | ("Chris Paul") | [:like "Danny Green"->"Tim Duncan"@0] | - | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("LeBron James") | [:like "Tim Duncan"->"Tony Parker"@0] | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Spurs") | [:like "Marco Belinelli"->"Tony Parker"@0] | - | | | [:like "Luka Doncic"->"James Harden"@0] | ("Rockets") | [:like "Tim Duncan"->"Manu Ginobili"@0] | - | | | [:serve "James Harden"->"Rockets"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | - | | | [:serve "James Harden"->"Thunders"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | - | | | | | [:serve "Marco Belinelli"->"Spurs"@0] | - | | | | | [:serve "Kevin Durant"->"Thunders"@0] | - | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | | | [:serve "Manu Ginobili"->"Spurs"@0] | - | | | | | [:serve "Chris Paul"->"Rockets"@0] | - | | | | | [:like "Chris Paul"->"LeBron James"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Paul George")] | <[edge1]> | - | [("Russell Westbrook"), ("Pacers"), ("Thunders")] | <[edge2]> | - | [("Dejounte Murray"), ("James Harden")] | <[edge3]> | - | <[vertex4]> | <[edge4]> | - - Scenario: Integer Vid bidirect edge - When executing query: - """ - GET SUBGRAPH WITH PROP FROM hash('Tony Parker') BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Manu Ginobili"->"Tim Duncan"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Tim Duncan") | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Tim Duncan"->"Manu Ginobili"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:like "Boris Diaw"->"Tim Duncan"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: Integer Vid pipe - When executing query: - """ - GO FROM hash('Tim Duncan') over serve YIELD serve._src AS id | GET SUBGRAPH WITH PROP FROM $-.id - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:like "Dejounte Murray"->"Danny Green"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Tony Parker"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: Integer Vid var - When executing query: - """ - $a = GO FROM hash('Tim Duncan') over serve YIELD serve._src AS id; - GET SUBGRAPH WITH PROP FROM $a.id - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:like "Dejounte Murray"->"Danny Green"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Tony Parker"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: Integer Vid many steps - When executing query: - """ - GET SUBGRAPH WITH PROP 4 STEPS FROM hash('Yao Ming') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 STEPS FROM hash('NOBODY') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("NOBODY")] | [] | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - When executing query: - """ - GET SUBGRAPH WITH PROP 5 steps from hash('Tony Parker') IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | vertex6 | edge6 | - | [:serve "Tony Parker"->"Hornets"@0] | ("Tim Duncan") | [:serve "Tim Duncan"->"Spurs"@0] | ("Aron Baynes") | [:serve "Aron Baynes"->"Celtics"@0] | ("Yao Ming") | [:serve "Yao Ming"->"Rockets"@0] | ("Grant Hill") | [:serve "Grant Hill"->"Clippers"@0] | ("Steve Nash") | [:serve "Steve Nash"->"Lakers"@0] | - | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | - | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | - | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | - | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | - | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | [:serve "Russell Westbrook"->"Thunders"@0] | | [:serve "Carmelo Anthony"->"Nuggets"@0] | | [:serve "Vince Carter"->"Nets"@0] | | | - | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:like "James Harden"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Rockets"@0] | | [:serve "Vince Carter"->"Raptors"@0] | | | - | | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Thunders"@0] | | [:serve "Vince Carter"->"Suns"@0] | | | - | | | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"James Harden"@0] | | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | | | - | | | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | | | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Cavaliers"@0] | | [:serve "Tracy McGrady"->"Magic"@0] | | [:serve "Kobe Bryant"->"Lakers"@0] | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Raptors"@0] | | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Paul Gasol"->"Kobe Bryant"@0] | | | - | | | [:serve "Marco Belinelli"->"76ers"@0] | | [:serve "Danny Green"->"Spurs"@0] | | [:serve "Tracy McGrady"->"Rockets"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Tracy McGrady"->"Spurs"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Grant Hill"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | | [:like "Vince Carter"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | | [:like "Tracy McGrady"->"Grant Hill"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | | [:serve "James Harden"->"Rockets"@0] | | [:serve "Dwyane Wade"->"Bulls"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | | [:serve "James Harden"->"Thunders"@0] | | [:serve "Dwyane Wade"->"Cavaliers"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | | [:like "Luka Doncic"->"James Harden"@0] | | [:serve "Dwyane Wade"->"Heat"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | [:serve "LeBron James"->"Cavaliers"@0] | | [:serve "Dwyane Wade"->"Heat"@1] | | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Heat"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Lakers"@0] | | [:serve "Kyrie Irving"->"Cavaliers"@0] | | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:serve "LeBron James"->"Cavaliers"@1] | | [:serve "Kyrie Irving"->"Celtics"@0] | | | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | [:like "Chris Paul"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Danny Green"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"James Harden"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | [:serve "Chris Paul"->"Clippers"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:serve "Chris Paul"->"Hornets"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:serve "Chris Paul"->"Rockets"@0] | | | | | | | - | | | | | [:like "Blake Griffin"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Carmelo Anthony"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Dwyane Wade"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Carmelo Anthony"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Dwyane Wade"@0] | | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - | <[vertex6]> | <[edge6]> | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 steps from hash('Tim Duncan') BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Kevin Durant") | [:like "Luka Doncic"->"James Harden"@0] | ("Tracy McGrady") | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Kobe Bryant") | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("James Harden") | [:like "Russell Westbrook"->"James Harden"@0] | ("Carmelo Anthony") | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Dirk Nowitzki") | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Chris Paul") | [:like "James Harden"->"Russell Westbrook"@0] | ("Luka Doncic") | [:like "Tracy McGrady"->"Grant Hill"@0] | ("Grant Hill") | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Danny Green") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Damian Lillard") | [:like "Blake Griffin"->"Chris Paul"@0] | ("Blake Griffin") | [:like "Tracy McGrady"->"Kobe Bryant"@0] | ("Vince Carter") | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | | | - | | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Yao Ming"->"Tracy McGrady"@0] | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | [] | - - Scenario: Integer Vid over end - When executing query: - """ - GET SUBGRAPH WITH PROP 10000000000000 STEPS FROM hash('Yao Ming') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH 10000000000000 STEPS FROM hash('Yao Ming') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - - Scenario: Integer Vid many steps without prop - When executing query: - """ - GET SUBGRAPH 4 STEPS FROM hash('Yao Ming') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH 4 STEPS FROM hash('NOBODY') IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("NOBODY")] | [] | - When executing query: - """ - GET SUBGRAPH 4 steps from hash('Yao Ming') IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - When executing query: - """ - GET SUBGRAPH 5 steps from hash('Tony Parker') IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | vertex6 | edge6 | - | [:serve "Tony Parker"->"Hornets"@0] | ("Tim Duncan") | [:serve "Tim Duncan"->"Spurs"@0] | ("Aron Baynes") | [:serve "Aron Baynes"->"Celtics"@0] | ("Yao Ming") | [:serve "Yao Ming"->"Rockets"@0] | ("Grant Hill") | [:serve "Grant Hill"->"Clippers"@0] | ("Steve Nash") | [:serve "Steve Nash"->"Lakers"@0] | - | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | - | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | - | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | - | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | - | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | [:serve "Russell Westbrook"->"Thunders"@0] | | [:serve "Carmelo Anthony"->"Nuggets"@0] | | [:serve "Vince Carter"->"Nets"@0] | | | - | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:like "James Harden"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Rockets"@0] | | [:serve "Vince Carter"->"Raptors"@0] | | | - | | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Thunders"@0] | | [:serve "Vince Carter"->"Suns"@0] | | | - | | | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"James Harden"@0] | | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | | | - | | | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | | | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Cavaliers"@0] | | [:serve "Tracy McGrady"->"Magic"@0] | | [:serve "Kobe Bryant"->"Lakers"@0] | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Raptors"@0] | | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Paul Gasol"->"Kobe Bryant"@0] | | | - | | | [:serve "Marco Belinelli"->"76ers"@0] | | [:serve "Danny Green"->"Spurs"@0] | | [:serve "Tracy McGrady"->"Rockets"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Tracy McGrady"->"Spurs"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Grant Hill"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | | [:like "Vince Carter"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | | [:like "Tracy McGrady"->"Grant Hill"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | | [:serve "James Harden"->"Rockets"@0] | | [:serve "Dwyane Wade"->"Bulls"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | | [:serve "James Harden"->"Thunders"@0] | | [:serve "Dwyane Wade"->"Cavaliers"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | | [:like "Luka Doncic"->"James Harden"@0] | | [:serve "Dwyane Wade"->"Heat"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | [:serve "LeBron James"->"Cavaliers"@0] | | [:serve "Dwyane Wade"->"Heat"@1] | | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Heat"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Lakers"@0] | | [:serve "Kyrie Irving"->"Cavaliers"@0] | | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:serve "LeBron James"->"Cavaliers"@1] | | [:serve "Kyrie Irving"->"Celtics"@0] | | | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | [:like "Chris Paul"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Danny Green"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"James Harden"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | [:serve "Chris Paul"->"Clippers"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:serve "Chris Paul"->"Hornets"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:serve "Chris Paul"->"Rockets"@0] | | | | | | | - | | | | | [:like "Blake Griffin"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Carmelo Anthony"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Dwyane Wade"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Carmelo Anthony"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Dwyane Wade"@0] | | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - | <[vertex6]> | <[edge6]> | - When executing query: - """ - GET SUBGRAPH 4 steps from hash('Tim Duncan') BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Kevin Durant") | [:like "Luka Doncic"->"James Harden"@0] | ("Tracy McGrady") | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Kobe Bryant") | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("James Harden") | [:like "Russell Westbrook"->"James Harden"@0] | ("Carmelo Anthony") | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Dirk Nowitzki") | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Chris Paul") | [:like "James Harden"->"Russell Westbrook"@0] | ("Luka Doncic") | [:like "Tracy McGrady"->"Grant Hill"@0] | ("Grant Hill") | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Danny Green") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Damian Lillard") | [:like "Blake Griffin"->"Chris Paul"@0] | ("Blake Griffin") | [:like "Tracy McGrady"->"Kobe Bryant"@0] | ("Vince Carter") | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | | | - | | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Yao Ming"->"Tracy McGrady"@0] | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | [] | - Scenario: yield Integer Vid zero step When executing query: """ diff --git a/tests/tck/features/subgraph/subgraph.feature b/tests/tck/features/subgraph/subgraph.feature index 622b28b8fae..c90b90017e9 100644 --- a/tests/tck/features/subgraph/subgraph.feature +++ b/tests/tck/features/subgraph/subgraph.feature @@ -9,12 +9,12 @@ Feature: subgraph Scenario: invalid input When executing query: """ - GET SUBGRAPH WITH PROP FROM $-.id + GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: `$-.id', not exist prop `id' When executing query: """ - GET SUBGRAPH WITH PROP FROM $a.id + GET SUBGRAPH WITH PROP FROM $a.id YIELD edges as relationships """ Then a SemanticError should be raised at runtime: `$a.id', not exist variable `a' When executing query: @@ -34,85 +34,85 @@ Feature: subgraph Then a SyntaxError should be raised at runtime: please add alias when using `edges'. near `edges' When executing query: """ - GO FROM "Tim Duncan" OVER like YIELD $$.player.age AS id | GET SUBGRAPH WITH PROP FROM $-.id + GO FROM "Tim Duncan" OVER like YIELD $$.player.age AS id | GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: `$-.id', the srcs should be type of FIXED_STRING, but was`INT' When executing query: """ - $a = GO FROM "Tim Duncan" OVER like YIELD $$.player.age AS ID; GET SUBGRAPH WITH PROP FROM $a.ID + $a = GO FROM "Tim Duncan" OVER like YIELD $$.player.age AS ID; GET SUBGRAPH WITH PROP FROM $a.ID YIELD edges as relationships """ Then a SemanticError should be raised at runtime: `$a.ID', the srcs should be type of FIXED_STRING, but was`INT' When executing query: """ - $a = GO FROM "Tim Duncan" OVER like YIELD like._src AS src; GET SUBGRAPH WITH PROP FROM $b.src + $a = GO FROM "Tim Duncan" OVER like YIELD like._src AS src; GET SUBGRAPH WITH PROP FROM $b.src YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: `$b.src', not exist variable `b' When executing query: """ - GO FROM "Tim Duncan" OVER like YIELD like._dst AS id, like._src AS id | GET SUBGRAPH WITH PROP FROM $-.id + GO FROM "Tim Duncan" OVER like YIELD like._dst AS id, like._src AS id | GET SUBGRAPH WITH PROP FROM $-.id YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' When executing query: """ - $a = GO FROM "Tim Duncan" OVER like YIELD like._dst AS id, like._src AS id; GET SUBGRAPH WITH PROP FROM $a.id + $a = GO FROM "Tim Duncan" OVER like YIELD like._dst AS id, like._src AS id; GET SUBGRAPH WITH PROP FROM $a.id YIELD vertices as nodes """ Then a SemanticError should be raised at runtime: Duplicate Column Name : `id' Scenario: zero step When executing query: """ - GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan" + GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan" YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Tim Duncan")] | When executing query: """ - GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan", "Spurs" + GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan", "Spurs" YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Tim Duncan"), ("Spurs")] | When executing query: """ - GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan", "Tony Parker", "Spurs" + GET SUBGRAPH WITH PROP 0 STEPS FROM "Tim Duncan", "Tony Parker", "Spurs" YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Tim Duncan"), ("Spurs"), ("Tony Parker")] | When executing query: """ - GO FROM 'Tim Duncan' over serve YIELD serve._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id + GO FROM 'Tim Duncan' over serve YIELD serve._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Spurs")] | When executing query: """ - GO FROM 'Tim Duncan' over like YIELD like._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id + GO FROM 'Tim Duncan' over like YIELD like._dst AS id | GET SUBGRAPH WITH PROP 0 STEPS FROM $-.id YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Manu Ginobili"), ("Tony Parker")] | When executing query: """ - $a = GO FROM 'Tim Duncan' over serve YIELD serve._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id + $a = GO FROM 'Tim Duncan' over serve YIELD serve._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Spurs")] | When executing query: """ - $a = GO FROM 'Tim Duncan' over like YIELD like._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id + $a = GO FROM 'Tim Duncan' over like YIELD like._dst AS id; GET SUBGRAPH WITH PROP 0 STEPS FROM $a.id YIELD vertices as nodes """ Then the result should be, in any order, with relax comparison: - | _vertices | + | nodes | | [("Manu Ginobili"), ("Tony Parker")] | Scenario: subgraph When executing query: """ - GET SUBGRAPH WITH PROP FROM 'Tim Duncan' + GET SUBGRAPH WITH PROP FROM 'Tim Duncan' YIELD vertices as nodes, edges as relationships """ Then define some list variables: | edge1 | vertex2 | edge2 | @@ -142,14 +142,14 @@ Feature: subgraph | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | | [:serve "Danny Green"->"Spurs"@0] | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | + | nodes | relationships | + | [("Tim Duncan")] | <[edge1]> | + | <[vertex2]> | <[edge2]> | Scenario: two steps When executing query: """ - GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' + GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' YIELD vertices as nodes, edges as relationships """ Then define some list variables: | edge1 | vertex2 | edge2 | vertex3 | edge3 | @@ -224,15 +224,15 @@ Feature: subgraph | | | [:serve "Tiago Splitter"->"76ers"@0] | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | + | nodes | relationships | + | [("Tim Duncan")] | <[edge1]> | + | <[vertex2]> | <[edge2]> | + | <[vertex3]> | <[edge3]> | Scenario: in edge When executing query: """ - GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' IN like, serve + GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' IN like, serve YIELD vertices as nodes, edges as relationships """ Then define some list variables: | edge1 | vertex2 | edge2 | vertex3 | @@ -254,15 +254,15 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | [] | + | nodes | relationships | + | [("Tim Duncan")] | <[edge1]> | + | <[vertex2]> | <[edge2]> | + | <[vertex3]> | [] | Scenario: in and out edge When executing query: """ - GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' IN like OUT serve + GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan' IN like OUT serve YIELD vertices as nodes, edges as relationships """ Then define some list variables: | edge1 | vertex2 | edge2 | vertex3 | edge3 | @@ -320,15 +320,15 @@ Feature: subgraph | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | + | nodes | relationships | + | [("Tim Duncan")] | <[edge1]> | + | <[vertex2]> | <[edge2]> | + | <[vertex3]> | <[edge3]> | Scenario: two steps in and out edge When executing query: """ - GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan', 'James Harden' IN teammate OUT serve + GET SUBGRAPH WITH PROP 2 STEPS FROM 'Tim Duncan', 'James Harden' IN teammate OUT serve YIELD vertices as nodes, edges as relationships """ Then define some list variables: | vertex1 | edge1 | vertex2 | edge2 | vertex3 | @@ -340,15 +340,15 @@ Feature: subgraph | | | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | | | | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | <[vertex1]> | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | [] | + | nodes | relationships | + | <[vertex1]> | <[edge1]> | + | <[vertex2]> | <[edge2]> | + | <[vertex3]> | [] | Scenario: three steps When executing query: """ - GET SUBGRAPH WITH PROP 3 STEPS FROM 'Paul George' OUT serve BOTH like + GET SUBGRAPH WITH PROP 3 STEPS FROM 'Paul George' OUT serve BOTH like YIELD vertices as nodes, edges as relationships """ Then define some list variables: | edge1 | edge2 | edge3 | vertex4 | edge4 | @@ -374,11 +374,11 @@ Feature: subgraph | | | | | [:serve "Chris Paul"->"Rockets"@0] | | | | | | [:like "Chris Paul"->"LeBron James"@0] | Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Paul George")] | <[edge1]> | - | [("Russell Westbrook"), ("Pacers"), ("Thunders")] | <[edge2]> | - | [("Dejounte Murray"), ("James Harden")] | <[edge3]> | - | <[vertex4]> | <[edge4]> | + | nodes | relationships | + | [("Paul George")] | <[edge1]> | + | [("Russell Westbrook"), ("Pacers"), ("Thunders")] | <[edge2]> | + | [("Dejounte Murray"), ("James Harden")] | <[edge3]> | + | <[vertex4]> | <[edge4]> | Scenario: yield bidirect edge When executing query: @@ -1035,535 +1035,6 @@ Feature: subgraph | [("Tom")] | [] | | [] | [] | - Scenario: bidirect edge - When executing query: - """ - GET SUBGRAPH WITH PROP FROM 'Tony Parker' BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Manu Ginobili"->"Tim Duncan"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Tim Duncan") | [:like "Marco Belinelli"->"Tim Duncan"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Tim Duncan"->"Manu Ginobili"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:like "Boris Diaw"->"Tim Duncan"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Boris Diaw") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"Tim Duncan"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: pipe - When executing query: - """ - GO FROM 'Tim Duncan' over serve YIELD serve._src AS id | GET SUBGRAPH WITH PROP FROM $-.id - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:like "Dejounte Murray"->"Danny Green"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Tony Parker"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: var - When executing query: - """ - $a = GO FROM 'Tim Duncan' over serve YIELD serve._src AS id; - GET SUBGRAPH WITH PROP FROM $a.id - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | - | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Danny Green") | [:like "Dejounte Murray"->"Danny Green"@0] | - | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Marco Belinelli"->"Danny Green"@0] | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Danny Green"->"Marco Belinelli"@0] | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:serve "Danny Green"->"Spurs"@0] | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Shaquille O\'Neal") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Spurs") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:serve "Manu Ginobili"->"Spurs"@0] | - | [:like "Shaquille O\'Neal"->"Tim Duncan"@0] | ("Marco Belinelli") | [:teammate "Manu Ginobili"->"Tony Parker"@0] | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Aron Baynes"->"Spurs"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Boris Diaw"->"Spurs"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | - | [:serve "Tim Duncan"->"Spurs"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | - | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | - | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:serve "Tony Parker"->"Spurs"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | - | | | [:serve "Tiago Splitter"->"Spurs"@0] | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - - Scenario: many steps - When executing query: - """ - GET SUBGRAPH WITH PROP 4 STEPS FROM 'Yao Ming' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 STEPS FROM 'NOBODY' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("NOBODY")] | [] | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - When executing query: - """ - GET SUBGRAPH WITH PROP 5 steps from 'Tony Parker' IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | vertex6 | edge6 | - | [:serve "Tony Parker"->"Hornets"@0] | ("Tim Duncan") | [:serve "Tim Duncan"->"Spurs"@0] | ("Aron Baynes") | [:serve "Aron Baynes"->"Celtics"@0] | ("Yao Ming") | [:serve "Yao Ming"->"Rockets"@0] | ("Grant Hill") | [:serve "Grant Hill"->"Clippers"@0] | ("Steve Nash") | [:serve "Steve Nash"->"Lakers"@0] | - | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | - | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | - | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | - | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | - | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | [:serve "Russell Westbrook"->"Thunders"@0] | | [:serve "Carmelo Anthony"->"Nuggets"@0] | | [:serve "Vince Carter"->"Nets"@0] | | | - | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:like "James Harden"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Rockets"@0] | | [:serve "Vince Carter"->"Raptors"@0] | | | - | | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Thunders"@0] | | [:serve "Vince Carter"->"Suns"@0] | | | - | | | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"James Harden"@0] | | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | | | - | | | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | | | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Cavaliers"@0] | | [:serve "Tracy McGrady"->"Magic"@0] | | [:serve "Kobe Bryant"->"Lakers"@0] | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Raptors"@0] | | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Paul Gasol"->"Kobe Bryant"@0] | | | - | | | [:serve "Marco Belinelli"->"76ers"@0] | | [:serve "Danny Green"->"Spurs"@0] | | [:serve "Tracy McGrady"->"Rockets"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Tracy McGrady"->"Spurs"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Grant Hill"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | | [:like "Vince Carter"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | | [:like "Tracy McGrady"->"Grant Hill"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | | [:serve "James Harden"->"Rockets"@0] | | [:serve "Dwyane Wade"->"Bulls"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | | [:serve "James Harden"->"Thunders"@0] | | [:serve "Dwyane Wade"->"Cavaliers"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | | [:like "Luka Doncic"->"James Harden"@0] | | [:serve "Dwyane Wade"->"Heat"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | [:serve "LeBron James"->"Cavaliers"@0] | | [:serve "Dwyane Wade"->"Heat"@1] | | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Heat"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Lakers"@0] | | [:serve "Kyrie Irving"->"Cavaliers"@0] | | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:serve "LeBron James"->"Cavaliers"@1] | | [:serve "Kyrie Irving"->"Celtics"@0] | | | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | [:like "Chris Paul"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Danny Green"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"James Harden"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | [:serve "Chris Paul"->"Clippers"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:serve "Chris Paul"->"Hornets"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:serve "Chris Paul"->"Rockets"@0] | | | | | | | - | | | | | [:like "Blake Griffin"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Carmelo Anthony"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Dwyane Wade"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Carmelo Anthony"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Dwyane Wade"@0] | | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - | <[vertex6]> | <[edge6]> | - When executing query: - """ - GET SUBGRAPH WITH PROP 4 steps from 'Tim Duncan' BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Kevin Durant") | [:like "Luka Doncic"->"James Harden"@0] | ("Tracy McGrady") | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Kobe Bryant") | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("James Harden") | [:like "Russell Westbrook"->"James Harden"@0] | ("Carmelo Anthony") | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Dirk Nowitzki") | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Chris Paul") | [:like "James Harden"->"Russell Westbrook"@0] | ("Luka Doncic") | [:like "Tracy McGrady"->"Grant Hill"@0] | ("Grant Hill") | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Danny Green") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Damian Lillard") | [:like "Blake Griffin"->"Chris Paul"@0] | ("Blake Griffin") | [:like "Tracy McGrady"->"Kobe Bryant"@0] | ("Vince Carter") | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | | | - | | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Yao Ming"->"Tracy McGrady"@0] | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | [] | - - Scenario: over end - When executing query: - """ - GET SUBGRAPH WITH PROP 10000000000000 STEPS FROM 'Yao Ming' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH 10000000000000 STEPS FROM 'Yao Ming' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - - Scenario: many steps without prop - When executing query: - """ - GET SUBGRAPH 4 STEPS FROM 'Yao Ming' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | [[:serve "Yao Ming"->"Rockets"@0]] | - | [("Rockets")] | [] | - When executing query: - """ - GET SUBGRAPH 4 STEPS FROM 'NOBODY' IN teammate OUT serve - """ - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("NOBODY")] | [] | - When executing query: - """ - GET SUBGRAPH 4 steps from 'Yao Ming' IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | - | [:serve "Yao Ming"->"Rockets"@0] | ("Shaquille O'Neal") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Kobe Bryant") | [:serve "Kobe Bryant"->"Lakers"@0] | ("Manu Ginobili") | [:serve "Manu Ginobili"->"Spurs"@0] | ("Dirk Nowitzki") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | - | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Tracy McGrady") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Grant Hill") | [:like "Paul Gasol"->"Kobe Bryant"@0] | ("Paul Gasol") | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | ("Kevin Durant") | [:serve "Kevin Durant"->"Warriors"@0] | - | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Rockets") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Vince Carter") | [:serve "Grant Hill"->"Clippers"@0] | ("Jason Kidd") | [:teammate "Tony Parker"->"Manu Ginobili"@0] | ("Damian Lillard") | [:serve "Damian Lillard"->"Trail Blazers"@0] | - | | | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Tim Duncan") | [:serve "Grant Hill"->"Magic"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Manu Ginobili"@0] | ("James Harden") | [:serve "James Harden"->"Rockets"@0] | - | | | [:serve "Shaquille O'Neal"->"Magic"@0] | ("JaVale McGee") | [:serve "Grant Hill"->"Pistons"@0] | ("Marco Belinelli") | [:like "Tiago Splitter"->"Manu Ginobili"@0] | ("Chris Paul") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | - | | | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Rudy Gay") | [:serve "Grant Hill"->"Suns"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"Manu Ginobili"@0] | ("LeBron James") | [:like "Russell Westbrook"->"James Harden"@0] | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Magic") | [:serve "Vince Carter"->"Grizzlies"@0] | ("Aron Baynes") | [:serve "Paul Gasol"->"Bucks"@0] | ("Steve Nash") | [:like "James Harden"->"Russell Westbrook"@0] | - | | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Spurs") | [:serve "Vince Carter"->"Hawks"@0] | ("Boris Diaw") | [:serve "Paul Gasol"->"Bulls"@0] | ("Marc Gasol") | [:serve "Chris Paul"->"Clippers"@0] | - | | | [:serve "Tracy McGrady"->"Magic"@0] | ("Celtics") | [:serve "Vince Carter"->"Kings"@0] | ("Danny Green") | [:serve "Paul Gasol"->"Grizzlies"@0] | ("Kyle Anderson") | [:serve "Chris Paul"->"Hornets"@0] | - | | | [:serve "Tracy McGrady"->"Raptors"@0] | ("Heat") | [:serve "Vince Carter"->"Magic"@0] | ("LaMarcus Aldridge") | [:serve "Paul Gasol"->"Lakers"@0] | ("Russell Westbrook") | [:serve "Chris Paul"->"Rockets"@0] | - | | | [:serve "Tracy McGrady"->"Rockets"@0] | ("Suns") | [:serve "Vince Carter"->"Mavericks"@0] | ("Tiago Splitter") | [:serve "Paul Gasol"->"Spurs"@0] | ("76ers") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | - | | | [:serve "Tracy McGrady"->"Spurs"@0] | ("Lakers") | [:serve "Vince Carter"->"Nets"@0] | ("Pistons") | [:like "Marc Gasol"->"Paul Gasol"@0] | ("Hornets") | [:like "Chris Paul"->"LeBron James"@0] | - | | | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Cavaliers") | [:serve "Vince Carter"->"Raptors"@0] | ("Nets") | [:like "Paul Gasol"->"Marc Gasol"@0] | ("Bucks") | [:serve "Steve Nash"->"Lakers"@0] | - | | | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Raptors") | [:serve "Vince Carter"->"Suns"@0] | ("Kings") | [:serve "Jason Kidd"->"Knicks"@0] | ("Knicks") | [:serve "Steve Nash"->"Mavericks"@0] | - | | | [:like "Tracy McGrady"->"Grant Hill"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | ("Clippers") | [:serve "Jason Kidd"->"Mavericks"@0] | ("Bulls") | [:serve "Steve Nash"->"Suns"@0] | - | | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | ("Mavericks") | [:serve "Jason Kidd"->"Nets"@0] | ("Trail Blazers") | [:serve "Steve Nash"->"Suns"@1] | - | | | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:serve "Tim Duncan"->"Spurs"@0] | ("Hawks") | [:serve "Jason Kidd"->"Suns"@0] | ("Jazz") | [:serve "LeBron James"->"Cavaliers"@1] | - | | | | | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Warriors") | [:serve "Jason Kidd"->"Mavericks"@1] | | [:serve "LeBron James"->"Lakers"@0] | - | | | | | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Nuggets") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "LeBron James"->"Heat"@0] | - | | | | | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Grizzlies") | [:like "Steve Nash"->"Jason Kidd"@0] | | [:serve "Marc Gasol"->"Grizzlies"@0] | - | | | | | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Wizards") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Marc Gasol"->"Raptors"@0] | - | | | | | [:like "Danny Green"->"Tim Duncan"@0] | | [:like "Jason Kidd"->"Steve Nash"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | - | | | | | [:like "Dejounte Murray"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | - | | | | | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | | [:serve "Tony Parker"->"Spurs"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | - | | | | | [:like "Manu Ginobili"->"Tim Duncan"@0] | | [:teammate "Manu Ginobili"->"Tony Parker"@0] | | [:serve "LeBron James"->"Cavaliers"@0] | - | | | | | [:like "Marco Belinelli"->"Tim Duncan"@0] | | [:teammate "Tim Duncan"->"Tony Parker"@0] | | | - | | | | | [:like "Tiago Splitter"->"Tim Duncan"@0] | | [:like "Boris Diaw"->"Tony Parker"@0] | | | - | | | | | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Dejounte Murray"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | | | - | | | | | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | - | | | | | [:serve "JaVale McGee"->"Lakers"@0] | | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Marco Belinelli"->"76ers"@0] | | | - | | | | | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Marco Belinelli"->"Bulls"@0] | | | - | | | | | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Marco Belinelli"->"Hawks"@0] | | | - | | | | | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Marco Belinelli"->"Hornets"@0] | | | - | | | | | [:serve "Rudy Gay"->"Grizzlies"@0] | | [:serve "Marco Belinelli"->"Kings"@0] | | | - | | | | | [:serve "Rudy Gay"->"Kings"@0] | | [:serve "Marco Belinelli"->"Raptors"@0] | | | - | | | | | [:serve "Rudy Gay"->"Raptors"@0] | | [:serve "Marco Belinelli"->"Spurs"@0] | | | - | | | | | [:serve "Rudy Gay"->"Spurs"@0] | | [:serve "Marco Belinelli"->"Warriors"@0] | | | - | | | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:serve "Marco Belinelli"->"Hornets"@1] | | | - | | | | | | | [:serve "Marco Belinelli"->"Spurs"@1] | | | - | | | | | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | | - | | | | | | | [:like "Marco Belinelli"->"Danny Green"@0] | | | - | | | | | | | [:serve "Dejounte Murray"->"Spurs"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Danny Green"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"James Harden"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"LeBron James"@0] | | | - | | | | | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Celtics"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Pistons"@0] | | | - | | | | | | | [:serve "Aron Baynes"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hawks"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Hornets"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Jazz"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Spurs"@0] | | | - | | | | | | | [:serve "Boris Diaw"->"Suns"@0] | | | - | | | | | | | [:serve "Danny Green"->"Cavaliers"@0] | | | - | | | | | | | [:serve "Danny Green"->"Raptors"@0] | | | - | | | | | | | [:serve "Danny Green"->"Spurs"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"Danny Green"@0] | | | - | | | | | | | [:like "Danny Green"->"LeBron James"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | | | - | | | | | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | | | - | | | | | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"76ers"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Hawks"@0] | | | - | | | | | | | [:serve "Tiago Splitter"->"Spurs"@0] | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Yao Ming")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - When executing query: - """ - GET SUBGRAPH 5 steps from 'Tony Parker' IN teammate OUT serve BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | edge5 | vertex6 | edge6 | - | [:serve "Tony Parker"->"Hornets"@0] | ("Tim Duncan") | [:serve "Tim Duncan"->"Spurs"@0] | ("Aron Baynes") | [:serve "Aron Baynes"->"Celtics"@0] | ("Yao Ming") | [:serve "Yao Ming"->"Rockets"@0] | ("Grant Hill") | [:serve "Grant Hill"->"Clippers"@0] | ("Steve Nash") | [:serve "Steve Nash"->"Lakers"@0] | - | [:serve "Tony Parker"->"Spurs"@0] | ("Boris Diaw") | [:teammate "Manu Ginobili"->"Tim Duncan"@0] | ("Rudy Gay") | [:serve "Aron Baynes"->"Pistons"@0] | ("Ray Allen") | [:like "Yao Ming"->"Tracy McGrady"@0] | ("Kristaps Porzingis") | [:serve "Grant Hill"->"Magic"@0] | ("Paul Gasol") | [:serve "Steve Nash"->"Mavericks"@0] | - | [:teammate "Manu Ginobili"->"Tony Parker"@0] | ("LaMarcus Aldridge") | [:teammate "Tony Parker"->"Tim Duncan"@0] | ("Damian Lillard") | [:serve "Aron Baynes"->"Spurs"@0] | ("Blake Griffin") | [:serve "Ray Allen"->"Bucks"@0] | ("Dirk Nowitzki") | [:serve "Grant Hill"->"Pistons"@0] | ("Jason Kidd") | [:serve "Steve Nash"->"Suns"@0] | - | [:teammate "Tim Duncan"->"Tony Parker"@0] | ("Manu Ginobili") | [:like "Aron Baynes"->"Tim Duncan"@0] | ("Kevin Durant") | [:serve "Rudy Gay"->"Grizzlies"@0] | ("Paul George") | [:serve "Ray Allen"->"Celtics"@0] | ("Rajon Rondo") | [:serve "Grant Hill"->"Suns"@0] | ("Pelicans") | [:serve "Steve Nash"->"Suns"@1] | - | [:like "Boris Diaw"->"Tony Parker"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:serve "Rudy Gay"->"Kings"@0] | ("JaVale McGee") | [:serve "Ray Allen"->"Heat"@0] | ("Vince Carter") | [:serve "Kristaps Porzingis"->"Knicks"@0] | ("Nets") | [:like "Jason Kidd"->"Steve Nash"@0] | - | [:like "Dejounte Murray"->"Tony Parker"@0] | ("Dejounte Murray") | [:like "Danny Green"->"Tim Duncan"@0] | ("Tiago Splitter") | [:serve "Rudy Gay"->"Raptors"@0] | ("Luka Doncic") | [:serve "Ray Allen"->"Thunders"@0] | ("Kobe Bryant") | [:serve "Kristaps Porzingis"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Spurs"@0] | - | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Hornets") | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Russell Westbrook") | [:serve "Rudy Gay"->"Spurs"@0] | ("Carmelo Anthony") | [:like "Rajon Rondo"->"Ray Allen"@0] | ("Wizards") | [:serve "Dirk Nowitzki"->"Mavericks"@0] | | [:like "Steve Nash"->"Jason Kidd"@0] | - | [:like "Marco Belinelli"->"Tony Parker"@0] | ("Spurs") | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Danny Green") | [:like "Tracy McGrady"->"Rudy Gay"@0] | ("Tracy McGrady") | [:like "Ray Allen"->"Rajon Rondo"@0] | ("Pacers") | [:like "Jason Kidd"->"Dirk Nowitzki"@0] | | [:serve "Paul Gasol"->"Lakers"@0] | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Kyle Anderson") | [:serve "Damian Lillard"->"Trail Blazers"@0] | ("Dwyane Wade") | [:serve "Blake Griffin"->"Clippers"@0] | ("Knicks") | [:like "Steve Nash"->"Dirk Nowitzki"@0] | | [:serve "Jason Kidd"->"Knicks"@0] | - | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("James Harden") | [:serve "Kevin Durant"->"Thunders"@0] | ("Kyrie Irving") | [:serve "Blake Griffin"->"Pistons"@0] | ("Bucks") | [:like "Dirk Nowitzki"->"Jason Kidd"@0] | | [:serve "Jason Kidd"->"Mavericks"@0] | - | [:like "Tony Parker"->"Manu Ginobili"@0] | | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("LeBron James") | [:serve "Kevin Durant"->"Warriors"@0] | ("Cavaliers") | [:serve "Paul George"->"Pacers"@0] | ("Mavericks") | [:like "Dirk Nowitzki"->"Steve Nash"@0] | | [:serve "Jason Kidd"->"Nets"@0] | - | [:like "Tony Parker"->"Tim Duncan"@0] | | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Chris Paul") | [:serve "Shaquille O'Neal"->"Cavaliers"@0] | ("Celtics") | [:serve "Paul George"->"Thunders"@0] | ("Nuggets") | [:serve "Rajon Rondo"->"Bulls"@0] | | [:serve "Jason Kidd"->"Suns"@0] | - | | | [:like "Tim Duncan"->"Manu Ginobili"@0] | ("Bulls") | [:serve "Shaquille O'Neal"->"Celtics"@0] | ("Pistons") | [:serve "JaVale McGee"->"Lakers"@0] | | [:serve "Rajon Rondo"->"Celtics"@0] | | [:serve "Jason Kidd"->"Mavericks"@1] | - | | | [:serve "Boris Diaw"->"Hawks"@0] | ("Jazz") | [:serve "Shaquille O'Neal"->"Heat"@0] | ("Grizzlies") | [:serve "JaVale McGee"->"Mavericks"@0] | | [:serve "Rajon Rondo"->"Kings"@0] | | [:serve "Paul Gasol"->"Bucks"@0] | - | | | [:serve "Boris Diaw"->"Hornets"@0] | ("Hawks") | [:serve "Shaquille O'Neal"->"Lakers"@0] | ("Heat") | [:serve "JaVale McGee"->"Nuggets"@0] | | [:serve "Rajon Rondo"->"Lakers"@0] | | [:serve "Paul Gasol"->"Bulls"@0] | - | | | [:serve "Boris Diaw"->"Jazz"@0] | ("Warriors") | [:serve "Shaquille O'Neal"->"Magic"@0] | ("Magic") | [:serve "JaVale McGee"->"Warriors"@0] | | [:serve "Rajon Rondo"->"Mavericks"@0] | | [:serve "Paul Gasol"->"Grizzlies"@0] | - | | | [:serve "Boris Diaw"->"Spurs"@0] | ("Suns") | [:serve "Shaquille O'Neal"->"Suns"@0] | ("Lakers") | [:serve "JaVale McGee"->"Wizards"@0] | | [:serve "Rajon Rondo"->"Pelicans"@0] | | | - | | | [:serve "Boris Diaw"->"Suns"@0] | ("Trail Blazers") | [:like "Yao Ming"->"Shaquille O'Neal"@0] | ("Clippers") | [:serve "Luka Doncic"->"Mavericks"@0] | | [:serve "Vince Carter"->"Grizzlies"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Spurs"@0] | ("Kings") | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | ("Thunders") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | [:serve "Vince Carter"->"Hawks"@0] | | | - | | | [:serve "LaMarcus Aldridge"->"Trail Blazers"@0] | ("Raptors") | [:serve "Tiago Splitter"->"76ers"@0] | ("Rockets") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | [:serve "Vince Carter"->"Kings"@0] | | | - | | | [:teammate "Tim Duncan"->"LaMarcus Aldridge"@0] | ("76ers") | [:serve "Tiago Splitter"->"Hawks"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | [:serve "Vince Carter"->"Magic"@0] | | | - | | | [:teammate "Tony Parker"->"LaMarcus Aldridge"@0] | | [:serve "Tiago Splitter"->"Spurs"@0] | | [:serve "Carmelo Anthony"->"Knicks"@0] | | [:serve "Vince Carter"->"Mavericks"@0] | | | - | | | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | | [:serve "Russell Westbrook"->"Thunders"@0] | | [:serve "Carmelo Anthony"->"Nuggets"@0] | | [:serve "Vince Carter"->"Nets"@0] | | | - | | | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | | [:like "James Harden"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Rockets"@0] | | [:serve "Vince Carter"->"Raptors"@0] | | | - | | | [:serve "Manu Ginobili"->"Spurs"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | [:serve "Carmelo Anthony"->"Thunders"@0] | | [:serve "Vince Carter"->"Suns"@0] | | | - | | | [:teammate "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"James Harden"@0] | | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | | [:like "Jason Kidd"->"Vince Carter"@0] | | | - | | | [:teammate "Tony Parker"->"Manu Ginobili"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | | [:like "Vince Carter"->"Jason Kidd"@0] | | | - | | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Cavaliers"@0] | | [:serve "Tracy McGrady"->"Magic"@0] | | [:serve "Kobe Bryant"->"Lakers"@0] | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | [:serve "Danny Green"->"Raptors"@0] | | [:serve "Tracy McGrady"->"Raptors"@0] | | [:like "Paul Gasol"->"Kobe Bryant"@0] | | | - | | | [:serve "Marco Belinelli"->"76ers"@0] | | [:serve "Danny Green"->"Spurs"@0] | | [:serve "Tracy McGrady"->"Rockets"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Bulls"@0] | | [:teammate "Tim Duncan"->"Danny Green"@0] | | [:serve "Tracy McGrady"->"Spurs"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hawks"@0] | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Grant Hill"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@0] | | [:serve "Kyle Anderson"->"Grizzlies"@0] | | [:like "Vince Carter"->"Tracy McGrady"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Kings"@0] | | [:serve "Kyle Anderson"->"Spurs"@0] | | [:like "Tracy McGrady"->"Grant Hill"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Raptors"@0] | | [:teammate "Tony Parker"->"Kyle Anderson"@0] | | [:like "Tracy McGrady"->"Kobe Bryant"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@0] | | [:serve "James Harden"->"Rockets"@0] | | [:serve "Dwyane Wade"->"Bulls"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Warriors"@0] | | [:serve "James Harden"->"Thunders"@0] | | [:serve "Dwyane Wade"->"Cavaliers"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Hornets"@1] | | [:like "Luka Doncic"->"James Harden"@0] | | [:serve "Dwyane Wade"->"Heat"@0] | | | | | - | | | [:serve "Marco Belinelli"->"Spurs"@1] | | [:serve "LeBron James"->"Cavaliers"@0] | | [:serve "Dwyane Wade"->"Heat"@1] | | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Heat"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | | | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:serve "LeBron James"->"Lakers"@0] | | [:serve "Kyrie Irving"->"Cavaliers"@0] | | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:serve "LeBron James"->"Cavaliers"@1] | | [:serve "Kyrie Irving"->"Celtics"@0] | | | | | - | | | [:serve "Dejounte Murray"->"Spurs"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Chris Paul"@0] | | [:like "Chris Paul"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Danny Green"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"James Harden"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kevin Durant"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Kyle Anderson"@0] | | [:serve "Chris Paul"->"Clippers"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:serve "Chris Paul"->"Hornets"@0] | | | | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:serve "Chris Paul"->"Rockets"@0] | | | | | | | - | | | | | [:like "Blake Griffin"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Carmelo Anthony"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Dwyane Wade"->"Chris Paul"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Carmelo Anthony"@0] | | | | | | | - | | | | | [:like "Chris Paul"->"Dwyane Wade"@0] | | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tony Parker")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | <[edge5]> | - | <[vertex6]> | <[edge6]> | - When executing query: - """ - GET SUBGRAPH 4 steps from 'Tim Duncan' BOTH like - """ - Then define some list variables: - | edge1 | vertex2 | edge2 | vertex3 | edge3 | vertex4 | edge4 | vertex5 | - | [:like "Aron Baynes"->"Tim Duncan"@0] | ("LaMarcus Aldridge") | [:like "Damian Lillard"->"LaMarcus Aldridge"@0] | ("Kevin Durant") | [:like "Luka Doncic"->"James Harden"@0] | ("Tracy McGrady") | [:like "Grant Hill"->"Tracy McGrady"@0] | ("Kobe Bryant") | - | [:like "Boris Diaw"->"Tim Duncan"@0] | ("Boris Diaw") | [:like "Rudy Gay"->"LaMarcus Aldridge"@0] | ("James Harden") | [:like "Russell Westbrook"->"James Harden"@0] | ("Carmelo Anthony") | [:like "Vince Carter"->"Tracy McGrady"@0] | ("Dirk Nowitzki") | - | [:like "Danny Green"->"Tim Duncan"@0] | ("Dejounte Murray") | [:like "Tony Parker"->"LaMarcus Aldridge"@0] | ("Chris Paul") | [:like "James Harden"->"Russell Westbrook"@0] | ("Luka Doncic") | [:like "Tracy McGrady"->"Grant Hill"@0] | ("Grant Hill") | - | [:like "Dejounte Murray"->"Tim Duncan"@0] | ("Danny Green") | [:like "LaMarcus Aldridge"->"Tony Parker"@0] | ("Damian Lillard") | [:like "Blake Griffin"->"Chris Paul"@0] | ("Blake Griffin") | [:like "Tracy McGrady"->"Kobe Bryant"@0] | ("Vince Carter") | - | [:like "LaMarcus Aldridge"->"Tim Duncan"@0] | ("Marco Belinelli") | [:like "Boris Diaw"->"Tony Parker"@0] | ("Rudy Gay") | [:like "Carmelo Anthony"->"Chris Paul"@0] | ("Dwyane Wade") | [:like "Dwyane Wade"->"Carmelo Anthony"@0] | ("Rajon Rondo") | - | [:like "Manu Ginobili"->"Tim Duncan"@0] | ("Aron Baynes") | [:like "Dejounte Murray"->"Chris Paul"@0] | ("Kyle Anderson") | [:like "Dwyane Wade"->"Chris Paul"@0] | ("Kyrie Irving") | [:like "Carmelo Anthony"->"Dwyane Wade"@0] | ("Kristaps Porzingis") | - | [:like "Marco Belinelli"->"Tim Duncan"@0] | ("Manu Ginobili") | [:like "Dejounte Murray"->"Danny Green"@0] | ("LeBron James") | [:like "Chris Paul"->"Carmelo Anthony"@0] | ("Ray Allen") | [:like "Kristaps Porzingis"->"Luka Doncic"@0] | | - | [:like "Shaquille O'Neal"->"Tim Duncan"@0] | ("Tiago Splitter") | [:like "Dejounte Murray"->"James Harden"@0] | ("Russell Westbrook") | [:like "Chris Paul"->"Dwyane Wade"@0] | ("Paul George") | [:like "Luka Doncic"->"Dirk Nowitzki"@0] | | - | [:like "Tiago Splitter"->"Tim Duncan"@0] | ("Shaquille O'Neal") | [:like "Dejounte Murray"->"Kevin Durant"@0] | ("Yao Ming") | [:like "Chris Paul"->"LeBron James"@0] | | [:like "Luka Doncic"->"Kristaps Porzingis"@0] | | - | [:like "Tony Parker"->"Tim Duncan"@0] | ("Tony Parker") | [:like "Dejounte Murray"->"Kyle Anderson"@0] | ("JaVale McGee") | [:like "Tracy McGrady"->"Rudy Gay"@0] | | [:like "Dirk Nowitzki"->"Dwyane Wade"@0] | | - | [:like "Tim Duncan"->"Manu Ginobili"@0] | | [:like "Dejounte Murray"->"LeBron James"@0] | | [:like "Carmelo Anthony"->"LeBron James"@0] | | [:like "Rajon Rondo"->"Ray Allen"@0] | | - | [:like "Tim Duncan"->"Tony Parker"@0] | | [:like "Dejounte Murray"->"Manu Ginobili"@0] | | [:like "Dwyane Wade"->"LeBron James"@0] | | [:like "Ray Allen"->"Rajon Rondo"@0] | | - | | | [:like "Dejounte Murray"->"Marco Belinelli"@0] | | [:like "Kyrie Irving"->"LeBron James"@0] | | | | - | | | [:like "Dejounte Murray"->"Russell Westbrook"@0] | | [:like "LeBron James"->"Ray Allen"@0] | | | | - | | | [:like "Dejounte Murray"->"Tony Parker"@0] | | [:like "Paul George"->"Russell Westbrook"@0] | | | | - | | | [:like "Marco Belinelli"->"Danny Green"@0] | | [:like "Russell Westbrook"->"Paul George"@0] | | | | - | | | [:like "Danny Green"->"LeBron James"@0] | | [:like "Yao Ming"->"Tracy McGrady"@0] | | | | - | | | [:like "Danny Green"->"Marco Belinelli"@0] | | | | | | - | | | [:like "Marco Belinelli"->"Tony Parker"@0] | | | | | | - | | | [:like "Tiago Splitter"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Tony Parker"->"Manu Ginobili"@0] | | | | | | - | | | [:like "Yao Ming"->"Shaquille O'Neal"@0] | | | | | | - | | | [:like "Shaquille O'Neal"->"JaVale McGee"@0] | | | | | | - Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tim Duncan")] | <[edge1]> | - | <[vertex2]> | <[edge2]> | - | <[vertex3]> | <[edge3]> | - | <[vertex4]> | <[edge4]> | - | <[vertex5]> | [] | - Scenario: Get subgraph in a space which doesn't have edge schema Given an empty graph And create a space with following options: @@ -1581,9 +1052,9 @@ Feature: subgraph Then the execution should be successful When executing query: """ - GET SUBGRAPH 1 STEPS FROM "Tom" + GET SUBGRAPH 1 STEPS FROM "Tom" YIELD vertices as nodes, edges as relationships """ Then the result should be, in any order, with relax comparison: - | _vertices | _edges | - | [("Tom")] | [] | - | [] | [] | + | nodes | relationships | + | [("Tom")] | [] | + | [] | [] | From b7c690114016ce855df000abcc376f8903b30b12 Mon Sep 17 00:00:00 2001 From: "kyle.cao" Date: Wed, 24 Nov 2021 09:50:41 +0800 Subject: [PATCH 42/53] optimize rewrite visitor (#3053) Co-authored-by: cpw <13495049+CPWstatic@users.noreply.github.com> --- src/graph/validator/GroupByValidator.cpp | 2 +- src/graph/validator/MatchValidator.cpp | 4 ++-- src/graph/visitor/RewriteVisitor.cpp | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/graph/validator/GroupByValidator.cpp b/src/graph/validator/GroupByValidator.cpp index 54c6818fd58..1c12f217450 100644 --- a/src/graph/validator/GroupByValidator.cpp +++ b/src/graph/validator/GroupByValidator.cpp @@ -49,7 +49,7 @@ Status GroupByValidator::validateYield(const YieldClause* yieldClause) { needGenProject_ = true; } if (!aggs.empty()) { - auto* colRewrited = ExpressionUtils::rewriteAgg2VarProp(colExpr); + auto* colRewrited = ExpressionUtils::rewriteAgg2VarProp(colExpr->clone()); projCols_->addColumn(new YieldColumn(colRewrited, colOldName)); continue; } diff --git a/src/graph/validator/MatchValidator.cpp b/src/graph/validator/MatchValidator.cpp index d2e6f009d72..0327cd3411e 100644 --- a/src/graph/validator/MatchValidator.cpp +++ b/src/graph/validator/MatchValidator.cpp @@ -732,8 +732,8 @@ Status MatchValidator::validateGroup(YieldClauseContext &yieldCtx) const { yieldCtx.aggOutputColumnNames_.emplace_back(agg->toString()); } if (!aggs.empty()) { - auto *rewrittenExpr = ExpressionUtils::rewriteAgg2VarProp(colExpr); - yieldCtx.projCols_->addColumn(new YieldColumn(rewrittenExpr, colOldName)); + auto *rewritedExpr = ExpressionUtils::rewriteAgg2VarProp(colExpr->clone()); + yieldCtx.projCols_->addColumn(new YieldColumn(rewritedExpr, colOldName)); yieldCtx.projOutputColumnNames_.emplace_back(colOldName); continue; } diff --git a/src/graph/visitor/RewriteVisitor.cpp b/src/graph/visitor/RewriteVisitor.cpp index b5d3b9a3822..3da631f210b 100644 --- a/src/graph/visitor/RewriteVisitor.cpp +++ b/src/graph/visitor/RewriteVisitor.cpp @@ -324,9 +324,9 @@ Expression *RewriteVisitor::transform(const Expression *expr, Matcher matcher, R return rewriter(expr); } else { RewriteVisitor visitor(std::move(matcher), std::move(rewriter)); - auto exprCopy = expr->clone(); - exprCopy->accept(&visitor); - return exprCopy; + auto *e = const_cast(expr); + e->accept(&visitor); + return e; } } @@ -339,9 +339,9 @@ Expression *RewriteVisitor::transform( return rewriter(expr); } else { RewriteVisitor visitor(std::move(matcher), std::move(rewriter), std::move(needVisitedTypes)); - auto exprCopy = expr->clone(); - exprCopy->accept(&visitor); - return exprCopy; + auto *e = const_cast(expr); + e->accept(&visitor); + return e; } } } // namespace graph From 683f7732492c5587cb5bcddd9e246c2a575e3a43 Mon Sep 17 00:00:00 2001 From: liwenhui-soul <38217397+liwenhui-soul@users.noreply.github.com> Date: Wed, 24 Nov 2021 17:02:40 +0800 Subject: [PATCH 43/53] fix meta follower read remaining jobs (#3347) --- src/meta/processors/job/JobManager.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/meta/processors/job/JobManager.cpp b/src/meta/processors/job/JobManager.cpp index 2d15e262d3b..ca2c116e719 100644 --- a/src/meta/processors/job/JobManager.cpp +++ b/src/meta/processors/job/JobManager.cpp @@ -66,6 +66,10 @@ JobManager::~JobManager() { shutDown(); } nebula::cpp2::ErrorCode JobManager::handleRemainingJobs() { std::unique_ptr iter; auto retCode = kvStore_->prefix(kDefaultSpaceId, kDefaultPartId, JobUtil::jobPrefix(), &iter); + if (retCode == nebula::cpp2::ErrorCode::E_LEADER_CHANGED) { + LOG(INFO) << "Not leader, skip reading remaining jobs"; + return nebula::cpp2::ErrorCode::SUCCEEDED; + } if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { LOG(ERROR) << "Can't find jobs, error: " << apache::thrift::util::enumNameSafe(retCode); return retCode; From ccc466a58ded6cd928fbcacfd9dd92b99941e3b0 Mon Sep 17 00:00:00 2001 From: yaphet <4414314+darionyaphet@users.noreply.github.com> Date: Wed, 24 Nov 2021 20:34:25 +0800 Subject: [PATCH 44/53] Remove group relevant component (#3330) * Remove group relevant component * fix test case --- src/clients/meta/MetaClient.cpp | 96 ------ src/clients/meta/MetaClient.h | 12 - src/graph/executor/CMakeLists.txt | 1 - src/graph/executor/Executor.cpp | 19 -- src/graph/executor/admin/GroupExecutor.cpp | 129 -------- src/graph/executor/admin/GroupExecutor.h | 65 ---- src/graph/planner/plan/Admin.h | 142 -------- src/graph/planner/plan/PlanNode.cpp | 12 - src/graph/planner/plan/PlanNode.h | 6 - src/graph/service/PermissionCheck.cpp | 6 - src/graph/validator/MaintainValidator.cpp | 68 ---- src/graph/validator/MaintainValidator.h | 74 ----- src/graph/validator/Validator.cpp | 12 - src/interface/meta.thrift | 50 --- src/meta/CMakeLists.txt | 5 - src/meta/MetaServiceHandler.cpp | 39 --- src/meta/MetaServiceHandler.h | 14 - .../processors/zone/AddGroupProcessor.cpp | 126 -------- src/meta/processors/zone/AddGroupProcessor.h | 31 -- .../processors/zone/DropGroupProcessor.cpp | 65 ---- src/meta/processors/zone/DropGroupProcessor.h | 31 -- .../processors/zone/GetGroupProcessor.cpp | 46 --- src/meta/processors/zone/GetGroupProcessor.h | 29 -- .../processors/zone/ListGroupsProcessor.cpp | 41 --- .../processors/zone/ListGroupsProcessor.h | 29 -- .../processors/zone/UpdateGroupProcessor.cpp | 155 --------- .../processors/zone/UpdateGroupProcessor.h | 42 --- src/meta/test/GroupZoneTest.cpp | 302 ------------------ src/meta/test/MetaClientTest.cpp | 183 ----------- src/meta/test/ProcessorTest.cpp | 296 ----------------- src/parser/MaintainSentences.cpp | 30 -- src/parser/MaintainSentences.h | 94 ------ src/parser/Sentence.h | 6 - src/parser/parser.yy | 55 ---- src/parser/test/ParserTest.cpp | 35 -- tests/maintain/test_zone.py | 33 -- 36 files changed, 2379 deletions(-) delete mode 100644 src/graph/executor/admin/GroupExecutor.cpp delete mode 100644 src/graph/executor/admin/GroupExecutor.h delete mode 100644 src/meta/processors/zone/AddGroupProcessor.cpp delete mode 100644 src/meta/processors/zone/AddGroupProcessor.h delete mode 100644 src/meta/processors/zone/DropGroupProcessor.cpp delete mode 100644 src/meta/processors/zone/DropGroupProcessor.h delete mode 100644 src/meta/processors/zone/GetGroupProcessor.cpp delete mode 100644 src/meta/processors/zone/GetGroupProcessor.h delete mode 100644 src/meta/processors/zone/ListGroupsProcessor.cpp delete mode 100644 src/meta/processors/zone/ListGroupsProcessor.h delete mode 100644 src/meta/processors/zone/UpdateGroupProcessor.cpp delete mode 100644 src/meta/processors/zone/UpdateGroupProcessor.h diff --git a/src/clients/meta/MetaClient.cpp b/src/clients/meta/MetaClient.cpp index 21880d788ae..7576b0f9649 100644 --- a/src/clients/meta/MetaClient.cpp +++ b/src/clients/meta/MetaClient.cpp @@ -3116,102 +3116,6 @@ folly::Future>> MetaClient::listZones() { return future; } -folly::Future> MetaClient::addGroup(std::string groupName, - std::vector zoneNames) { - cpp2::AddGroupReq req; - req.set_group_name(std::move(groupName)); - req.set_zone_names(std::move(zoneNames)); - - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_addGroup(request); }, - [](cpp2::ExecResp&& resp) -> bool { - return resp.get_code() == nebula::cpp2::ErrorCode::SUCCEEDED; - }, - std::move(promise)); - return future; -} - -folly::Future> MetaClient::dropGroup(std::string groupName) { - cpp2::DropGroupReq req; - req.set_group_name(std::move(groupName)); - - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_dropGroup(request); }, - [](cpp2::ExecResp&& resp) -> bool { - return resp.get_code() == nebula::cpp2::ErrorCode::SUCCEEDED; - }, - std::move(promise)); - return future; -} - -folly::Future> MetaClient::addZoneIntoGroup(std::string zoneName, - std::string groupName) { - cpp2::AddZoneIntoGroupReq req; - req.set_zone_name(zoneName); - req.set_group_name(groupName); - - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_addZoneIntoGroup(request); }, - [](cpp2::ExecResp&& resp) -> bool { - return resp.get_code() == nebula::cpp2::ErrorCode::SUCCEEDED; - }, - std::move(promise)); - return future; -} - -folly::Future> MetaClient::dropZoneFromGroup(std::string zoneName, - std::string groupName) { - cpp2::DropZoneFromGroupReq req; - req.set_zone_name(zoneName); - req.set_group_name(groupName); - - folly::Promise> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_dropZoneFromGroup(request); }, - [](cpp2::ExecResp&& resp) -> bool { - return resp.get_code() == nebula::cpp2::ErrorCode::SUCCEEDED; - }, - std::move(promise)); - return future; -} - -folly::Future>> MetaClient::getGroup(std::string groupName) { - cpp2::GetGroupReq req; - req.set_group_name(std::move(groupName)); - - folly::Promise>> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_getGroup(request); }, - [](cpp2::GetGroupResp&& resp) -> decltype(auto) { return resp.get_zone_names(); }, - std::move(promise)); - return future; -} - -folly::Future>> MetaClient::listGroups() { - cpp2::ListGroupsReq req; - folly::Promise>> promise; - auto future = promise.getFuture(); - getResponse( - std::move(req), - [](auto client, auto request) { return client->future_listGroups(request); }, - [](cpp2::ListGroupsResp&& resp) -> decltype(auto) { return resp.get_groups(); }, - std::move(promise)); - return future; -} - folly::Future> MetaClient::getStats(GraphSpaceID spaceId) { cpp2::GetStatsReq req; req.set_space_id(spaceId); diff --git a/src/clients/meta/MetaClient.h b/src/clients/meta/MetaClient.h index 4e19f2e79ee..0bb794c6178 100644 --- a/src/clients/meta/MetaClient.h +++ b/src/clients/meta/MetaClient.h @@ -607,18 +607,6 @@ class MetaClient { folly::Future>> listZones(); - folly::Future> addGroup(std::string groupName, std::vector zoneNames); - - folly::Future> dropGroup(std::string groupName); - - folly::Future> addZoneIntoGroup(std::string zoneName, std::string groupName); - - folly::Future> dropZoneFromGroup(std::string zoneName, std::string groupName); - - folly::Future>> getGroup(std::string groupName); - - folly::Future>> listGroups(); - Status refreshCache(); folly::Future> getStats(GraphSpaceID spaceId); diff --git a/src/graph/executor/CMakeLists.txt b/src/graph/executor/CMakeLists.txt index 261f9128113..b7270c16ff9 100644 --- a/src/graph/executor/CMakeLists.txt +++ b/src/graph/executor/CMakeLists.txt @@ -64,7 +64,6 @@ nebula_add_library( admin/DownloadExecutor.cpp admin/IngestExecutor.cpp admin/ConfigExecutor.cpp - admin/GroupExecutor.cpp admin/ZoneExecutor.cpp admin/ShowTSClientsExecutor.cpp admin/SignInTSServiceExecutor.cpp diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index 93115ba4fc8..27a6985e620 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -23,7 +23,6 @@ #include "graph/executor/admin/DownloadExecutor.h" #include "graph/executor/admin/DropUserExecutor.h" #include "graph/executor/admin/GrantRoleExecutor.h" -#include "graph/executor/admin/GroupExecutor.h" #include "graph/executor/admin/IngestExecutor.h" #include "graph/executor/admin/KillQueryExecutor.h" #include "graph/executor/admin/ListRolesExecutor.h" @@ -431,24 +430,6 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kSubgraph: { return pool->add(new SubgraphExecutor(node, qctx)); } - case PlanNode::Kind::kAddGroup: { - return pool->add(new AddGroupExecutor(node, qctx)); - } - case PlanNode::Kind::kDropGroup: { - return pool->add(new DropGroupExecutor(node, qctx)); - } - case PlanNode::Kind::kDescribeGroup: { - return pool->add(new DescribeGroupExecutor(node, qctx)); - } - case PlanNode::Kind::kAddZoneIntoGroup: { - return pool->add(new AddZoneIntoGroupExecutor(node, qctx)); - } - case PlanNode::Kind::kDropZoneFromGroup: { - return pool->add(new DropZoneFromGroupExecutor(node, qctx)); - } - case PlanNode::Kind::kShowGroups: { - return pool->add(new ListGroupsExecutor(node, qctx)); - } case PlanNode::Kind::kAddZone: { return pool->add(new AddZoneExecutor(node, qctx)); } diff --git a/src/graph/executor/admin/GroupExecutor.cpp b/src/graph/executor/admin/GroupExecutor.cpp deleted file mode 100644 index 462690f8f27..00000000000 --- a/src/graph/executor/admin/GroupExecutor.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "graph/executor/admin/GroupExecutor.h" - -#include "common/time/ScopedTimer.h" -#include "graph/planner/plan/Admin.h" - -namespace nebula { -namespace graph { - -folly::Future AddGroupExecutor::execute() { - SCOPED_TIMER(&execTime_); - auto *agNode = asNode(node()); - return qctx() - ->getMetaClient() - ->addGroup(agNode->groupName(), agNode->zoneNames()) - .via(runner()) - .thenValue([](StatusOr resp) { - if (!resp.ok()) { - LOG(ERROR) << "Add Group Failed: " << resp.status(); - return resp.status(); - } - return Status::OK(); - }); -} - -folly::Future DropGroupExecutor::execute() { - SCOPED_TIMER(&execTime_); - auto *dgNode = asNode(node()); - return qctx() - ->getMetaClient() - ->dropGroup(dgNode->groupName()) - .via(runner()) - .thenValue([](StatusOr resp) { - if (!resp.ok()) { - LOG(ERROR) << "Drop Group Failed: " << resp.status(); - return resp.status(); - } - return Status::OK(); - }); -} - -folly::Future DescribeGroupExecutor::execute() { - SCOPED_TIMER(&execTime_); - auto *dgNode = asNode(node()); - return qctx() - ->getMetaClient() - ->getGroup(dgNode->groupName()) - .via(runner()) - .thenValue([this](StatusOr> resp) { - if (!resp.ok()) { - LOG(ERROR) << "Describe Group Failed: " << resp.status(); - return resp.status(); - } - - auto zones = std::move(resp).value(); - DataSet dataSet({"Zone"}); - for (auto &zone : zones) { - Row row({zone}); - dataSet.rows.emplace_back(std::move(row)); - } - return finish(ResultBuilder() - .value(Value(std::move(dataSet))) - .iter(Iterator::Kind::kDefault) - .build()); - }); -} - -folly::Future AddZoneIntoGroupExecutor::execute() { - SCOPED_TIMER(&execTime_); - auto *azNode = asNode(node()); - return qctx() - ->getMetaClient() - ->addZoneIntoGroup(azNode->zoneName(), azNode->groupName()) - .via(runner()) - .thenValue([](StatusOr resp) { - if (!resp.ok()) { - LOG(ERROR) << "Add Zone Into Group Failed: " << resp.status(); - return resp.status(); - } - return Status::OK(); - }); -} - -folly::Future DropZoneFromGroupExecutor::execute() { - SCOPED_TIMER(&execTime_); - auto *dzNode = asNode(node()); - return qctx() - ->getMetaClient() - ->dropZoneFromGroup(dzNode->zoneName(), dzNode->groupName()) - .via(runner()) - .thenValue([](StatusOr resp) { - if (!resp.ok()) { - LOG(ERROR) << "Drop Zone From Group Failed: " << resp.status(); - return resp.status(); - } - return Status::OK(); - }); -} - -folly::Future ListGroupsExecutor::execute() { - SCOPED_TIMER(&execTime_); - return qctx()->getMetaClient()->listGroups().via(runner()).thenValue( - [this](StatusOr> resp) { - if (!resp.ok()) { - LOG(ERROR) << "List Groups Failed: " << resp.status(); - return resp.status(); - } - - auto groups = std::move(resp).value(); - DataSet dataSet({"Name", "Zone"}); - for (auto &group : groups) { - for (auto &zone : group.get_zone_names()) { - Row row({*group.group_name_ref(), zone}); - dataSet.rows.emplace_back(std::move(row)); - } - } - return finish(ResultBuilder() - .value(Value(std::move(dataSet))) - .iter(Iterator::Kind::kDefault) - .build()); - }); -} - -} // namespace graph -} // namespace nebula diff --git a/src/graph/executor/admin/GroupExecutor.h b/src/graph/executor/admin/GroupExecutor.h deleted file mode 100644 index 0cb38a3fd7f..00000000000 --- a/src/graph/executor/admin/GroupExecutor.h +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef GRAPH_EXECUTOR_ADMIN_GROUPEXECUTOR_H_ -#define GRAPH_EXECUTOR_ADMIN_GROUPEXECUTOR_H_ - -#include "graph/executor/Executor.h" - -namespace nebula { -namespace graph { - -class AddGroupExecutor final : public Executor { - public: - AddGroupExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("AddGroupExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -class DropGroupExecutor final : public Executor { - public: - DropGroupExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("DropGroupExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -class DescribeGroupExecutor final : public Executor { - public: - DescribeGroupExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("DescribeGroupExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -class AddZoneIntoGroupExecutor final : public Executor { - public: - AddZoneIntoGroupExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("AddZoneIntoGroupExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -class DropZoneFromGroupExecutor final : public Executor { - public: - DropZoneFromGroupExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("DropZoneFromGroupExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -class ListGroupsExecutor final : public Executor { - public: - ListGroupsExecutor(const PlanNode *node, QueryContext *qctx) - : Executor("ListGroupsExecutor", node, qctx) {} - - folly::Future execute() override; -}; - -} // namespace graph -} // namespace nebula - -#endif // GRAPH_EXECUTOR_ADMIN_GROUPEXECUTOR_H_ diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index a49ea464c53..998fdea1fea 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -755,82 +755,6 @@ class ShowCollation final : public SingleDependencyNode { : SingleDependencyNode(qctx, Kind::kShowCollation, input) {} }; -class AddGroup final : public SingleDependencyNode { - public: - static AddGroup* make(QueryContext* qctx, - PlanNode* input, - std::string groupName, - std::vector zoneNames) { - return qctx->objPool()->add( - new AddGroup(qctx, input, std::move(groupName), std::move(zoneNames))); - } - - const std::string& groupName() const { return groupName_; } - - const std::vector& zoneNames() const { return zoneNames_; } - - private: - AddGroup(QueryContext* qctx, - PlanNode* input, - std::string groupName, - std::vector zoneNames) - : SingleDependencyNode(qctx, Kind::kAddGroup, input) { - groupName_ = std::move(groupName); - zoneNames_ = std::move(zoneNames); - } - - private: - std::string groupName_; - std::vector zoneNames_; -}; - -class DropGroup final : public SingleDependencyNode { - public: - static DropGroup* make(QueryContext* qctx, PlanNode* input, std::string groupName) { - return qctx->objPool()->add(new DropGroup(qctx, input, std::move(groupName))); - } - - const std::string& groupName() const { return groupName_; } - - private: - DropGroup(QueryContext* qctx, PlanNode* input, std::string groupName) - : SingleDependencyNode(qctx, Kind::kDropGroup, input) { - groupName_ = std::move(groupName); - } - - private: - std::string groupName_; -}; - -class DescribeGroup final : public SingleDependencyNode { - public: - static DescribeGroup* make(QueryContext* qctx, PlanNode* input, std::string groupName) { - return qctx->objPool()->add(new DescribeGroup(qctx, input, std::move(groupName))); - } - - const std::string& groupName() const { return groupName_; } - - private: - DescribeGroup(QueryContext* qctx, PlanNode* input, std::string groupName) - : SingleDependencyNode(qctx, Kind::kDescribeGroup, input) { - groupName_ = std::move(groupName); - } - - private: - std::string groupName_; -}; - -class ListGroups final : public SingleDependencyNode { - public: - static ListGroups* make(QueryContext* qctx, PlanNode* input) { - return qctx->objPool()->add(new ListGroups(qctx, input)); - } - - private: - ListGroups(QueryContext* qctx, PlanNode* input) - : SingleDependencyNode(qctx, Kind::kShowGroups, input) {} -}; - class AddHostIntoZone final : public SingleDependencyNode { public: static AddHostIntoZone* make(QueryContext* qctx, @@ -981,72 +905,6 @@ class ListZones final : public SingleDependencyNode { : SingleDependencyNode(qctx, Kind::kShowZones, input) {} }; -class AddZoneIntoGroup final : public SingleDependencyNode { - public: - static AddZoneIntoGroup* make(QueryContext* qctx, - PlanNode* input, - std::string groupName, - std::string zoneName) { - return qctx->objPool()->add( - new AddZoneIntoGroup(qctx, input, std::move(zoneName), std::move(groupName))); - } - - const std::string& zoneName() const { return zoneName_; } - - const std::string& groupName() const { return groupName_; } - - private: - AddZoneIntoGroup(QueryContext* qctx, PlanNode* input, std::string zoneName, std::string groupName) - : SingleDependencyNode(qctx, Kind::kAddZoneIntoGroup, input) { - zoneName_ = std::move(zoneName); - groupName_ = std::move(groupName); - } - - private: - std::string zoneName_; - std::string groupName_; -}; - -class DropZoneFromGroup final : public SingleDependencyNode { - public: - static DropZoneFromGroup* make(QueryContext* qctx, - PlanNode* input, - std::string groupName, - std::string zoneName) { - return qctx->objPool()->add( - new DropZoneFromGroup(qctx, input, std::move(zoneName), std::move(groupName))); - } - - const std::string& zoneName() const { return zoneName_; } - - const std::string& groupName() const { return groupName_; } - - private: - DropZoneFromGroup(QueryContext* qctx, - PlanNode* input, - std::string zoneName, - std::string groupName) - : SingleDependencyNode(qctx, Kind::kDropZoneFromGroup, input) { - zoneName_ = std::move(zoneName); - groupName_ = std::move(groupName); - } - - private: - std::string zoneName_; - std::string groupName_; -}; - -class ShowGroups final : public SingleDependencyNode { - public: - static ShowGroups* make(QueryContext* qctx, PlanNode* input) { - return qctx->objPool()->add(new ShowGroups(qctx, input)); - } - - private: - ShowGroups(QueryContext* qctx, PlanNode* input) - : SingleDependencyNode(qctx, Kind::kShowGroups, input) {} -}; - class ShowZones final : public SingleDependencyNode { public: static ShowZones* make(QueryContext* qctx, PlanNode* input) { diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index f9d1bcfc4a2..388c08afab5 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -231,28 +231,16 @@ const char* PlanNode::toString(PlanNode::Kind kind) { case Kind::kSubgraph: return "Subgraph"; // Group and Zone - case Kind::kAddGroup: - return "AddGroup"; - case Kind::kDropGroup: - return "DropGroup"; case Kind::kAddZone: return "AddZone"; case Kind::kDropZone: return "DropZone"; - case Kind::kDescribeGroup: - return "DescribeGroup"; - case Kind::kAddZoneIntoGroup: - return "AddZoneIntoGroup"; - case Kind::kDropZoneFromGroup: - return "DropZoneFromGroup"; case Kind::kDescribeZone: return "DescribeZone"; case Kind::kAddHostIntoZone: return "AddHostIntoZone"; case Kind::kDropHostFromZone: return "DropHostFromZone"; - case Kind::kShowGroups: - return "ShowGroups"; case Kind::kShowZones: return "ShowZones"; case Kind::kAddListener: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index ed50f0a9c51..a5fdc3b5918 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -145,13 +145,7 @@ class PlanNode { kShowMetaLeader, // zone related - kShowGroups, kShowZones, - kAddGroup, - kDropGroup, - kDescribeGroup, - kAddZoneIntoGroup, - kDropZoneFromGroup, kAddZone, kDropZone, kDescribeZone, diff --git a/src/graph/service/PermissionCheck.cpp b/src/graph/service/PermissionCheck.cpp index 626d7ec4a04..c0752634f3e 100644 --- a/src/graph/service/PermissionCheck.cpp +++ b/src/graph/service/PermissionCheck.cpp @@ -55,12 +55,6 @@ Status PermissionCheck::permissionCheck(ClientSession *session, case Sentence::Kind::kDropSpace: case Sentence::Kind::kCreateSnapshot: case Sentence::Kind::kDropSnapshot: - case Sentence::Kind::kAddGroup: - case Sentence::Kind::kDropGroup: - case Sentence::Kind::kDescribeGroup: - case Sentence::Kind::kListGroups: - case Sentence::Kind::kAddZoneIntoGroup: - case Sentence::Kind::kDropZoneFromGroup: case Sentence::Kind::kAddZone: case Sentence::Kind::kDropZone: case Sentence::Kind::kDescribeZone: diff --git a/src/graph/validator/MaintainValidator.cpp b/src/graph/validator/MaintainValidator.cpp index 73483e092ac..609479c1348 100644 --- a/src/graph/validator/MaintainValidator.cpp +++ b/src/graph/validator/MaintainValidator.cpp @@ -460,74 +460,6 @@ Status ShowEdgeIndexStatusValidator::toPlan() { return Status::OK(); } -Status AddGroupValidator::validateImpl() { - auto sentence = static_cast(sentence_); - if (*sentence->groupName() == "default") { - return Status::SemanticError("Group default conflict"); - } - return Status::OK(); -} - -Status AddGroupValidator::toPlan() { - auto sentence = static_cast(sentence_); - auto *doNode = - AddGroup::make(qctx_, nullptr, *sentence->groupName(), sentence->zoneNames()->zoneNames()); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - -Status DropGroupValidator::validateImpl() { return Status::OK(); } - -Status DropGroupValidator::toPlan() { - auto sentence = static_cast(sentence_); - auto *doNode = DropGroup::make(qctx_, nullptr, *sentence->groupName()); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - -Status DescribeGroupValidator::validateImpl() { return Status::OK(); } - -Status DescribeGroupValidator::toPlan() { - auto sentence = static_cast(sentence_); - auto *doNode = DescribeGroup::make(qctx_, nullptr, *sentence->groupName()); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - -Status ListGroupsValidator::validateImpl() { return Status::OK(); } - -Status ListGroupsValidator::toPlan() { - auto *doNode = ListGroups::make(qctx_, nullptr); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - -Status AddZoneIntoGroupValidator::validateImpl() { return Status::OK(); } - -Status AddZoneIntoGroupValidator::toPlan() { - auto sentence = static_cast(sentence_); - auto *doNode = - AddZoneIntoGroup::make(qctx_, nullptr, *sentence->groupName(), *sentence->zoneName()); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - -Status DropZoneFromGroupValidator::validateImpl() { return Status::OK(); } - -Status DropZoneFromGroupValidator::toPlan() { - auto sentence = static_cast(sentence_); - auto *doNode = - DropZoneFromGroup::make(qctx_, nullptr, *sentence->groupName(), *sentence->zoneName()); - root_ = doNode; - tail_ = root_; - return Status::OK(); -} - Status AddZoneValidator::validateImpl() { return Status::OK(); } Status AddZoneValidator::toPlan() { diff --git a/src/graph/validator/MaintainValidator.h b/src/graph/validator/MaintainValidator.h index 095f4266d4a..865994424ad 100644 --- a/src/graph/validator/MaintainValidator.h +++ b/src/graph/validator/MaintainValidator.h @@ -310,80 +310,6 @@ class ShowEdgeIndexStatusValidator final : public Validator { Status toPlan() override; }; -class AddGroupValidator final : public Validator { - public: - AddGroupValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - -class DropGroupValidator final : public Validator { - public: - DropGroupValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - -class DescribeGroupValidator final : public Validator { - public: - DescribeGroupValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - -class ListGroupsValidator final : public Validator { - public: - ListGroupsValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - -class AddZoneIntoGroupValidator final : public Validator { - public: - AddZoneIntoGroupValidator(Sentence* sentence, QueryContext* context) - : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - -class DropZoneFromGroupValidator final : public Validator { - public: - DropZoneFromGroupValidator(Sentence* sentence, QueryContext* context) - : Validator(sentence, context) { - setNoSpaceRequired(); - } - - private: - Status validateImpl() override; - - Status toPlan() override; -}; - class AddZoneValidator final : public Validator { public: AddZoneValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { diff --git a/src/graph/validator/Validator.cpp b/src/graph/validator/Validator.cpp index 6c2aba0cfb2..f7e2cb79816 100644 --- a/src/graph/validator/Validator.cpp +++ b/src/graph/validator/Validator.cpp @@ -200,18 +200,6 @@ std::unique_ptr Validator::makeValidator(Sentence* sentence, QueryCon return std::make_unique(sentence, context); case Sentence::Kind::kLookup: return std::make_unique(sentence, context); - case Sentence::Kind::kAddGroup: - return std::make_unique(sentence, context); - case Sentence::Kind::kDropGroup: - return std::make_unique(sentence, context); - case Sentence::Kind::kDescribeGroup: - return std::make_unique(sentence, context); - case Sentence::Kind::kListGroups: - return std::make_unique(sentence, context); - case Sentence::Kind::kAddZoneIntoGroup: - return std::make_unique(sentence, context); - case Sentence::Kind::kDropZoneFromGroup: - return std::make_unique(sentence, context); case Sentence::Kind::kAddZone: return std::make_unique(sentence, context); case Sentence::Kind::kDropZone: diff --git a/src/interface/meta.thrift b/src/interface/meta.thrift index 166d738623c..50afb2462fc 100644 --- a/src/interface/meta.thrift +++ b/src/interface/meta.thrift @@ -853,49 +853,6 @@ struct ListZonesResp { 3: list zones, } -struct AddGroupReq { - 1: binary group_name, - 2: list zone_names, -} - -struct DropGroupReq { - 1: binary group_name, -} - -struct AddZoneIntoGroupReq { - 1: binary zone_name, - 2: binary group_name, -} - -struct DropZoneFromGroupReq { - 1: binary zone_name, - 2: binary group_name, -} - -struct GetGroupReq { - 1: binary group_name, -} - -struct GetGroupResp { - 1: common.ErrorCode code, - 2: common.HostAddr leader, - 3: list zone_names, -} - -struct ListGroupsReq { -} - -struct Group { - 1: binary group_name, - 2: list zone_names, -} - -struct ListGroupsResp { - 1: common.ErrorCode code, - 2: common.HostAddr leader, - 3: list groups, -} - enum ListenerType { UNKNOWN = 0x00, ELASTICSEARCH = 0x01, @@ -1226,13 +1183,6 @@ service MetaService { GetZoneResp getZone(1: GetZoneReq req); ListZonesResp listZones(1: ListZonesReq req); - ExecResp addGroup(1: AddGroupReq req); - ExecResp dropGroup(1: DropGroupReq req); - ExecResp addZoneIntoGroup(1: AddZoneIntoGroupReq req); - ExecResp dropZoneFromGroup(1: DropZoneFromGroupReq req); - GetGroupResp getGroup(1: GetGroupReq req); - ListGroupsResp listGroups(1: ListGroupsReq req); - CreateBackupResp createBackup(1: CreateBackupReq req); ExecResp restoreMeta(1: RestoreMetaReq req); ExecResp addListener(1: AddListenerReq req); diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index f5ecbe7bcd8..a02e1bd3073 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -85,11 +85,6 @@ nebula_add_library( processors/zone/GetZoneProcessor.cpp processors/zone/ListZonesProcessor.cpp processors/zone/UpdateZoneProcessor.cpp - processors/zone/AddGroupProcessor.cpp - processors/zone/DropGroupProcessor.cpp - processors/zone/GetGroupProcessor.cpp - processors/zone/ListGroupsProcessor.cpp - processors/zone/UpdateGroupProcessor.cpp processors/listener/ListenerProcessor.cpp processors/session/SessionManagerProcessor.cpp ) diff --git a/src/meta/MetaServiceHandler.cpp b/src/meta/MetaServiceHandler.cpp index 4e58544af0b..58a26b8c038 100644 --- a/src/meta/MetaServiceHandler.cpp +++ b/src/meta/MetaServiceHandler.cpp @@ -61,15 +61,10 @@ #include "meta/processors/schema/ListTagsProcessor.h" #include "meta/processors/session/SessionManagerProcessor.h" #include "meta/processors/user/AuthenticationProcessor.h" -#include "meta/processors/zone/AddGroupProcessor.h" #include "meta/processors/zone/AddZoneProcessor.h" -#include "meta/processors/zone/DropGroupProcessor.h" #include "meta/processors/zone/DropZoneProcessor.h" -#include "meta/processors/zone/GetGroupProcessor.h" #include "meta/processors/zone/GetZoneProcessor.h" -#include "meta/processors/zone/ListGroupsProcessor.h" #include "meta/processors/zone/ListZonesProcessor.h" -#include "meta/processors/zone/UpdateGroupProcessor.h" #include "meta/processors/zone/UpdateZoneProcessor.h" #define RETURN_FUTURE(processor) \ @@ -455,40 +450,6 @@ folly::Future MetaServiceHandler::future_dropHostFromZone( RETURN_FUTURE(processor); } -folly::Future MetaServiceHandler::future_addGroup(const cpp2::AddGroupReq& req) { - auto* processor = AddGroupProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_dropGroup(const cpp2::DropGroupReq& req) { - auto* processor = DropGroupProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_getGroup( - const cpp2::GetGroupReq& req) { - auto* processor = GetGroupProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_listGroups( - const cpp2::ListGroupsReq& req) { - auto* processor = ListGroupsProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_addZoneIntoGroup( - const cpp2::AddZoneIntoGroupReq& req) { - auto* processor = AddZoneIntoGroupProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - -folly::Future MetaServiceHandler::future_dropZoneFromGroup( - const cpp2::DropZoneFromGroupReq& req) { - auto* processor = DropZoneFromGroupProcessor::instance(kvstore_); - RETURN_FUTURE(processor); -} - folly::Future MetaServiceHandler::future_addListener( const cpp2::AddListenerReq& req) { auto* processor = AddListenerProcessor::instance(kvstore_); diff --git a/src/meta/MetaServiceHandler.h b/src/meta/MetaServiceHandler.h index 2ea463e7c89..1a66097c08c 100644 --- a/src/meta/MetaServiceHandler.h +++ b/src/meta/MetaServiceHandler.h @@ -190,20 +190,6 @@ class MetaServiceHandler final : public cpp2::MetaServiceSvIf { folly::Future future_dropHostFromZone( const cpp2::DropHostFromZoneReq& req) override; - folly::Future future_addGroup(const cpp2::AddGroupReq& req) override; - - folly::Future future_dropGroup(const cpp2::DropGroupReq& req) override; - - folly::Future future_getGroup(const cpp2::GetGroupReq& req) override; - - folly::Future future_listGroups(const cpp2::ListGroupsReq& req) override; - - folly::Future future_addZoneIntoGroup( - const cpp2::AddZoneIntoGroupReq& req) override; - - folly::Future future_dropZoneFromGroup( - const cpp2::DropZoneFromGroupReq& req) override; - // listener folly::Future future_addListener(const cpp2::AddListenerReq& req) override; diff --git a/src/meta/processors/zone/AddGroupProcessor.cpp b/src/meta/processors/zone/AddGroupProcessor.cpp deleted file mode 100644 index 316ca7a3a37..00000000000 --- a/src/meta/processors/zone/AddGroupProcessor.cpp +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/zone/AddGroupProcessor.h" - -namespace nebula { -namespace meta { - -void AddGroupProcessor::process(const cpp2::AddGroupReq& req) { - folly::SharedMutex::WriteHolder wHolder(LockUtils::groupLock()); - auto groupName = req.get_group_name(); - auto zoneNames = req.get_zone_names(); - if (zoneNames.empty()) { - LOG(ERROR) << "The zone names should not be empty."; - handleErrorCode(nebula::cpp2::ErrorCode::E_INVALID_PARM); - onFinished(); - return; - } - - std::set zoneSet(zoneNames.begin(), zoneNames.end()); - if (zoneNames.size() != zoneSet.size()) { - LOG(ERROR) << "Conflict zone found in the group."; - handleErrorCode(nebula::cpp2::ErrorCode::E_CONFLICT); - onFinished(); - return; - } - - // check the zone existed - const auto& prefix = MetaKeyUtils::zonePrefix(); - auto iterRet = doPrefix(prefix); - if (!nebula::ok(iterRet)) { - auto retCode = nebula::error(iterRet); - LOG(ERROR) << "Get zones failed: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto iter = nebula::value(iterRet).get(); - std::vector zones; - while (iter->valid()) { - auto zoneName = MetaKeyUtils::parseZoneName(iter->key()); - zones.emplace_back(std::move(zoneName)); - iter->next(); - } - - for (auto name = zoneNames.begin(); name != zoneNames.end(); name++) { - if (std::find(zones.begin(), zones.end(), *name) == zones.end()) { - LOG(ERROR) << "Zone: " << *name << " not existed"; - handleErrorCode(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND); - onFinished(); - return; - } - } - - auto retCode = checkGroupRedundancy(zoneNames); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(retCode); - onFinished(); - return; - } - - auto groupRet = getGroupId(groupName); - if (nebula::ok(groupRet)) { - LOG(ERROR) << "Group " << groupName << " already existed"; - handleErrorCode(nebula::cpp2::ErrorCode::E_EXISTED); - onFinished(); - return; - } else { - retCode = nebula::error(groupRet); - if (retCode != nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND) { - LOG(ERROR) << "Create Group failed, group name " << groupName - << " error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - } - - auto groupIdRet = autoIncrementId(); - if (!nebula::ok(groupIdRet)) { - LOG(ERROR) << "Create Group failed"; - handleErrorCode(nebula::error(groupIdRet)); - onFinished(); - return; - } - - std::vector data; - auto groupId = nebula::value(groupIdRet); - data.emplace_back(MetaKeyUtils::indexGroupKey(groupName), - std::string(reinterpret_cast(&groupId), sizeof(GroupID))); - data.emplace_back(MetaKeyUtils::groupKey(groupName), MetaKeyUtils::groupVal(zoneNames)); - - LOG(INFO) << "Create Group: " << groupName; - doSyncPutAndUpdate(std::move(data)); -} - -nebula::cpp2::ErrorCode AddGroupProcessor::checkGroupRedundancy(std::vector zones) { - const auto& prefix = MetaKeyUtils::groupPrefix(); - auto iterRet = doPrefix(prefix); - if (!nebula::ok(iterRet)) { - auto retCode = nebula::error(iterRet); - LOG(ERROR) << "Get groups failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - auto iter = nebula::value(iterRet).get(); - - std::sort(zones.begin(), zones.end()); - while (iter->valid()) { - auto groupName = MetaKeyUtils::parseGroupName(iter->key()); - auto zoneNames = MetaKeyUtils::parseZoneNames(iter->val()); - std::sort(zoneNames.begin(), zoneNames.end()); - if (zones == zoneNames) { - LOG(ERROR) << "Group " << groupName - << " have created, although the zones order maybe not the same"; - return nebula::cpp2::ErrorCode::E_EXISTED; - } - iter->next(); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/zone/AddGroupProcessor.h b/src/meta/processors/zone/AddGroupProcessor.h deleted file mode 100644 index 868331f8f5c..00000000000 --- a/src/meta/processors/zone/AddGroupProcessor.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_ADDGROUPPROCESSOR_H -#define META_ADDGROUPPROCESSOR_H - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class AddGroupProcessor : public BaseProcessor { - public: - static AddGroupProcessor* instance(kvstore::KVStore* kvstore) { - return new AddGroupProcessor(kvstore); - } - - void process(const cpp2::AddGroupReq& req); - - private: - explicit AddGroupProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - - nebula::cpp2::ErrorCode checkGroupRedundancy(std::vector zones); -}; - -} // namespace meta -} // namespace nebula - -#endif // META_ADDGROUPPROCESSOR_H diff --git a/src/meta/processors/zone/DropGroupProcessor.cpp b/src/meta/processors/zone/DropGroupProcessor.cpp deleted file mode 100644 index 50f40385fa4..00000000000 --- a/src/meta/processors/zone/DropGroupProcessor.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/zone/DropGroupProcessor.h" - -namespace nebula { -namespace meta { - -void DropGroupProcessor::process(const cpp2::DropGroupReq& req) { - folly::SharedMutex::WriteHolder wHolder(LockUtils::groupLock()); - auto groupName = req.get_group_name(); - auto groupIdRet = getGroupId(groupName); - if (!nebula::ok(groupIdRet)) { - auto retCode = nebula::error(groupIdRet); - if (retCode == nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND) { - LOG(ERROR) << "Drop Group Failed, Group " << groupName << " not found."; - } else { - LOG(ERROR) << "Drop Group Failed, error: " << apache::thrift::util::enumNameSafe(retCode); - } - handleErrorCode(retCode); - onFinished(); - return; - } - - // If any space rely on this group, it should not be dropped. - auto retCode = checkSpaceDependency(groupName); - if (retCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(retCode); - onFinished(); - return; - } - - std::vector keys; - keys.emplace_back(MetaKeyUtils::indexGroupKey(groupName)); - keys.emplace_back(MetaKeyUtils::groupKey(groupName)); - LOG(INFO) << "Drop Group: " << groupName; - doSyncMultiRemoveAndUpdate(std::move(keys)); -} - -nebula::cpp2::ErrorCode DropGroupProcessor::checkSpaceDependency(const std::string& groupName) { - const auto& prefix = MetaKeyUtils::spacePrefix(); - auto iterRet = doPrefix(prefix); - if (!nebula::ok(iterRet)) { - auto retCode = nebula::error(iterRet); - LOG(ERROR) << "List spaces failed, error: " << apache::thrift::util::enumNameSafe(retCode); - return retCode; - } - auto iter = nebula::value(iterRet).get(); - - while (iter->valid()) { - auto properties = MetaKeyUtils::parseSpace(iter->val()); - if (properties.group_name_ref().has_value() && *properties.group_name_ref() == groupName) { - LOG(ERROR) << "Space " << properties.get_space_name() << " is bind to the group " - << groupName; - return nebula::cpp2::ErrorCode::E_NOT_DROP; - } - iter->next(); - } - return nebula::cpp2::ErrorCode::SUCCEEDED; -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/zone/DropGroupProcessor.h b/src/meta/processors/zone/DropGroupProcessor.h deleted file mode 100644 index 975aee33b61..00000000000 --- a/src/meta/processors/zone/DropGroupProcessor.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_DROPGROUPPROCESSOR_H -#define META_DROPGROUPPROCESSOR_H - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class DropGroupProcessor : public BaseProcessor { - public: - static DropGroupProcessor* instance(kvstore::KVStore* kvstore) { - return new DropGroupProcessor(kvstore); - } - - void process(const cpp2::DropGroupReq& req); - - private: - explicit DropGroupProcessor(kvstore::KVStore* kvstore) : BaseProcessor(kvstore) {} - - nebula::cpp2::ErrorCode checkSpaceDependency(const std::string& groupName); -}; - -} // namespace meta -} // namespace nebula - -#endif // META_DROPGROUPPROCESSOR_H diff --git a/src/meta/processors/zone/GetGroupProcessor.cpp b/src/meta/processors/zone/GetGroupProcessor.cpp deleted file mode 100644 index 24fcde38317..00000000000 --- a/src/meta/processors/zone/GetGroupProcessor.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/zone/GetGroupProcessor.h" - -namespace nebula { -namespace meta { - -void GetGroupProcessor::process(const cpp2::GetGroupReq& req) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - auto& groupName = req.get_group_name(); - auto groupIdRet = getGroupId(groupName); - if (!nebula::ok(groupIdRet)) { - auto retCode = nebula::error(groupIdRet); - if (retCode == nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND) { - LOG(ERROR) << "Get Group Failed, Group " << groupName << " not found."; - } else { - LOG(ERROR) << "Get Group Failed, error: " << apache::thrift::util::enumNameSafe(retCode); - } - handleErrorCode(retCode); - onFinished(); - return; - } - - auto groupKey = MetaKeyUtils::groupKey(groupName); - auto groupValueRet = doGet(std::move(groupKey)); - if (!nebula::ok(groupValueRet)) { - auto retCode = nebula::error(groupValueRet); - LOG(ERROR) << "Get group " << groupName << " failed, error " - << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); - LOG(INFO) << "Get Group: " << groupName << " zone size: " << zoneNames.size(); - resp_.set_zone_names(std::move(zoneNames)); - handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); - onFinished(); -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/zone/GetGroupProcessor.h b/src/meta/processors/zone/GetGroupProcessor.h deleted file mode 100644 index 2129d1923c9..00000000000 --- a/src/meta/processors/zone/GetGroupProcessor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_GETGROUPPROCESSOR_H -#define META_GETGROUPPROCESSOR_H - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class GetGroupProcessor : public BaseProcessor { - public: - static GetGroupProcessor* instance(kvstore::KVStore* kvstore) { - return new GetGroupProcessor(kvstore); - } - - void process(const cpp2::GetGroupReq& req); - - private: - explicit GetGroupProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -} // namespace meta -} // namespace nebula -#endif // META_GETGROUPPROCESSOR_H diff --git a/src/meta/processors/zone/ListGroupsProcessor.cpp b/src/meta/processors/zone/ListGroupsProcessor.cpp deleted file mode 100644 index 17bbfad38c7..00000000000 --- a/src/meta/processors/zone/ListGroupsProcessor.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/zone/ListGroupsProcessor.h" - -namespace nebula { -namespace meta { - -void ListGroupsProcessor::process(const cpp2::ListGroupsReq&) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - const auto& prefix = MetaKeyUtils::groupPrefix(); - auto iterRet = doPrefix(prefix); - if (!nebula::ok(iterRet)) { - auto retCode = nebula::error(iterRet); - LOG(ERROR) << "List groups failed, error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - auto iter = nebula::value(iterRet).get(); - - std::vector groups; - while (iter->valid()) { - auto groupName = MetaKeyUtils::parseGroupName(iter->key()); - auto zoneNames = MetaKeyUtils::parseZoneNames(iter->val()); - cpp2::Group group; - group.set_group_name(std::move(groupName)); - group.set_zone_names(std::move(zoneNames)); - groups.emplace_back(std::move(group)); - iter->next(); - } - - handleErrorCode(nebula::cpp2::ErrorCode::SUCCEEDED); - resp_.set_groups(std::move(groups)); - onFinished(); -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/zone/ListGroupsProcessor.h b/src/meta/processors/zone/ListGroupsProcessor.h deleted file mode 100644 index 77bf00f293d..00000000000 --- a/src/meta/processors/zone/ListGroupsProcessor.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_LISTGROUPSPROCESSOR_H -#define META_LISTGROUPSPROCESSOR_H - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class ListGroupsProcessor : public BaseProcessor { - public: - static ListGroupsProcessor* instance(kvstore::KVStore* kvstore) { - return new ListGroupsProcessor(kvstore); - } - - void process(const cpp2::ListGroupsReq& req); - - private: - explicit ListGroupsProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -} // namespace meta -} // namespace nebula -#endif // META_LISTGROUPSPROCESSOR_H diff --git a/src/meta/processors/zone/UpdateGroupProcessor.cpp b/src/meta/processors/zone/UpdateGroupProcessor.cpp deleted file mode 100644 index 5b3db6a6b83..00000000000 --- a/src/meta/processors/zone/UpdateGroupProcessor.cpp +++ /dev/null @@ -1,155 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "meta/processors/zone/UpdateGroupProcessor.h" - -namespace nebula { -namespace meta { - -void AddZoneIntoGroupProcessor::process(const cpp2::AddZoneIntoGroupReq& req) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - auto groupName = req.get_group_name(); - auto groupIdRet = getGroupId(groupName); - if (!nebula::ok(groupIdRet)) { - auto retCode = nebula::error(groupIdRet); - LOG(ERROR) << "Get Group failed, group " << groupName - << " error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto groupKey = MetaKeyUtils::groupKey(groupName); - auto groupValueRet = doGet(std::move(groupKey)); - if (!nebula::ok(groupValueRet)) { - auto retCode = nebula::error(groupValueRet); - if (retCode == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { - retCode = nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND; - } - LOG(ERROR) << "Get group " << groupName << " failed, error " - << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto zoneName = req.get_zone_name(); - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); - auto iter = std::find(zoneNames.begin(), zoneNames.end(), zoneName); - if (iter != zoneNames.end()) { - LOG(ERROR) << "Zone " << zoneName << " already exist in the group " << groupName; - handleErrorCode(nebula::cpp2::ErrorCode::E_EXISTED); - onFinished(); - return; - } - - const auto& zonePrefix = MetaKeyUtils::zonePrefix(); - auto iterRet = doPrefix(zonePrefix); - if (!nebula::ok(iterRet)) { - auto retCode = nebula::error(iterRet); - LOG(ERROR) << "Get zones failed, error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - auto zoneIter = nebula::value(iterRet).get(); - - bool found = false; - while (zoneIter->valid()) { - auto name = MetaKeyUtils::parseZoneName(zoneIter->key()); - if (name == zoneName) { - found = true; - break; - } - zoneIter->next(); - } - - if (!found) { - LOG(ERROR) << "Zone " << zoneName << " not found"; - handleErrorCode(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND); - onFinished(); - return; - } - - zoneNames.emplace_back(zoneName); - std::vector data; - data.emplace_back(std::move(groupKey), MetaKeyUtils::groupVal(zoneNames)); - LOG(INFO) << "Add Zone " << zoneName << " Into Group " << groupName; - doSyncPutAndUpdate(std::move(data)); -} - -void DropZoneFromGroupProcessor::process(const cpp2::DropZoneFromGroupReq& req) { - folly::SharedMutex::ReadHolder rHolder(LockUtils::groupLock()); - auto groupName = req.get_group_name(); - auto groupIdRet = getGroupId(groupName); - if (!nebula::ok(groupIdRet)) { - auto retCode = nebula::error(groupIdRet); - LOG(ERROR) << " Get Group " << groupName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto groupKey = MetaKeyUtils::groupKey(groupName); - auto groupValueRet = doGet(groupKey); - if (!nebula::ok(groupValueRet)) { - auto retCode = nebula::error(groupValueRet); - if (retCode == nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND) { - retCode = nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND; - } - LOG(ERROR) << "Get group " << groupName - << " failed, error: " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - auto zoneName = req.get_zone_name(); - auto zoneNames = MetaKeyUtils::parseZoneNames(std::move(nebula::value(groupValueRet))); - auto iter = std::find(zoneNames.begin(), zoneNames.end(), zoneName); - if (iter == zoneNames.end()) { - LOG(ERROR) << "Zone " << zoneName << " not exist in the group " << groupName; - handleErrorCode(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND); - onFinished(); - return; - } - - const auto& spacePrefix = MetaKeyUtils::spacePrefix(); - auto spaceRet = doPrefix(spacePrefix); - if (!nebula::ok(spaceRet)) { - auto retCode = nebula::error(spaceRet); - LOG(ERROR) << "List spaces failed, error " << apache::thrift::util::enumNameSafe(retCode); - handleErrorCode(retCode); - onFinished(); - return; - } - - nebula::cpp2::ErrorCode spaceCode = nebula::cpp2::ErrorCode::SUCCEEDED; - auto spaceIter = nebula::value(spaceRet).get(); - while (spaceIter->valid()) { - auto properties = MetaKeyUtils::parseSpace(spaceIter->val()); - if (properties.group_name_ref().has_value() && *properties.group_name_ref() == groupName) { - LOG(ERROR) << "Space is bind to the group " << *properties.group_name_ref(); - spaceCode = nebula::cpp2::ErrorCode::E_CONFLICT; - } - spaceIter->next(); - } - - if (spaceCode != nebula::cpp2::ErrorCode::SUCCEEDED) { - handleErrorCode(spaceCode); - onFinished(); - return; - } - - zoneNames.erase(iter); - std::vector data; - data.emplace_back(std::move(groupKey), MetaKeyUtils::groupVal(zoneNames)); - LOG(INFO) << "Drop Zone " << zoneName << " From Group " << groupName; - doSyncPutAndUpdate(std::move(data)); -} - -} // namespace meta -} // namespace nebula diff --git a/src/meta/processors/zone/UpdateGroupProcessor.h b/src/meta/processors/zone/UpdateGroupProcessor.h deleted file mode 100644 index a0367f8a898..00000000000 --- a/src/meta/processors/zone/UpdateGroupProcessor.h +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef META_UPDATEGROUPPROCESSOR_H -#define META_UPDATEGROUPPROCESSOR_H - -#include "meta/processors/BaseProcessor.h" - -namespace nebula { -namespace meta { - -class AddZoneIntoGroupProcessor : public BaseProcessor { - public: - static AddZoneIntoGroupProcessor* instance(kvstore::KVStore* kvstore) { - return new AddZoneIntoGroupProcessor(kvstore); - } - - void process(const cpp2::AddZoneIntoGroupReq& req); - - private: - explicit AddZoneIntoGroupProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -class DropZoneFromGroupProcessor : public BaseProcessor { - public: - static DropZoneFromGroupProcessor* instance(kvstore::KVStore* kvstore) { - return new DropZoneFromGroupProcessor(kvstore); - } - - void process(const cpp2::DropZoneFromGroupReq& req); - - private: - explicit DropZoneFromGroupProcessor(kvstore::KVStore* kvstore) - : BaseProcessor(kvstore) {} -}; - -} // namespace meta -} // namespace nebula -#endif // META_UPDATEGROUPPROCESSOR_H diff --git a/src/meta/test/GroupZoneTest.cpp b/src/meta/test/GroupZoneTest.cpp index 10af4a722b2..564cb1f7955 100644 --- a/src/meta/test/GroupZoneTest.cpp +++ b/src/meta/test/GroupZoneTest.cpp @@ -9,15 +9,10 @@ #include "common/fs/TempDir.h" #include "meta/processors/parts/CreateSpaceProcessor.h" #include "meta/processors/parts/DropSpaceProcessor.h" -#include "meta/processors/zone/AddGroupProcessor.h" #include "meta/processors/zone/AddZoneProcessor.h" -#include "meta/processors/zone/DropGroupProcessor.h" #include "meta/processors/zone/DropZoneProcessor.h" -#include "meta/processors/zone/GetGroupProcessor.h" #include "meta/processors/zone/GetZoneProcessor.h" -#include "meta/processors/zone/ListGroupsProcessor.h" #include "meta/processors/zone/ListZonesProcessor.h" -#include "meta/processors/zone/UpdateGroupProcessor.h" #include "meta/processors/zone/UpdateZoneProcessor.h" #include "meta/test/TestUtils.h" @@ -280,54 +275,6 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::E_KEY_NOT_FOUND, resp.get_code()); } -// Add Group -{ - cpp2::AddGroupReq req; - req.set_group_name("group_0"); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space"); - properties.set_partition_num(12); - properties.set_replica_factor(3); - properties.set_group_name("group_0"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Drop host from zone -{ - cpp2::DropHostFromZoneReq req; - req.set_zone_name("zone_0"); - HostAddr node{"12", 12}; - req.set_node(std::move(node)); - auto* processor = DropHostFromZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_CONFLICT, resp.get_code()); -} -{ - cpp2::DropSpaceReq req; - req.set_space_name("space"); - req.set_if_exists(false); - auto* processor = DropSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} { cpp2::DropHostFromZoneReq req; req.set_zone_name("zone_0"); @@ -339,255 +286,6 @@ TEST(GroupAndZoneTest, GroupAndZoneTest) { auto resp = std::move(f).get(); ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); } -// Add Group which zone not exist -{ - LOG(INFO) << "Add Group which zone not exist"; - cpp2::AddGroupReq req; - req.set_group_name("group_zone_not_exist"); - std::vector zones = {"zone_0", "zone_1", "zone_4"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND, resp.get_code()); -} -// Group already existed -{ - cpp2::AddGroupReq req; - req.set_group_name("group_1"); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_EXISTED, resp.get_code()); -} -// Group already existed although the order is different -{ - cpp2::AddGroupReq req; - req.set_group_name("group_1"); - std::vector zones = {"zone_2", "zone_1", "zone_0"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_EXISTED, resp.get_code()); -} -// Add Group with empty zone name list -{ - cpp2::AddGroupReq req; - req.set_group_name("group_0"); - std::vector zones = {}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); -} -// Add Group with duplicate zone name -{ - cpp2::AddGroupReq req; - req.set_group_name("group_0"); - std::vector zones = {"zone_0", "zone_0", "zone_2"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_CONFLICT, resp.get_code()); -} -// Add Group name already existed -{ - cpp2::AddGroupReq req; - req.set_group_name("group_0"); - std::vector zones = {"zone_0", "zone_1"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_EXISTED, resp.get_code()); -} -{ - cpp2::AddGroupReq req; - req.set_group_name("group_1"); - std::vector zones = {"zone_0", "zone_1"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Get Group -{ - cpp2::GetGroupReq req; - req.set_group_name("group_0"); - auto* processor = GetGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(3, resp.get_zone_names().size()); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - ASSERT_EQ(zones, resp.get_zone_names()); -} -// Get Group which is not exist -{ - cpp2::GetGroupReq req; - req.set_group_name("group_not_exist"); - auto* processor = GetGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND, resp.get_code()); -} -// List Groups -{ - cpp2::ListGroupsReq req; - auto* processor = ListGroupsProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(2, resp.get_groups().size()); - ASSERT_EQ("group_0", resp.get_groups()[0].get_group_name()); - ASSERT_EQ("group_1", resp.get_groups()[1].get_group_name()); -} -{ - std::vector nodes; - for (int32_t i = 9; i < 12; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_3"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Add zone into group -{ - cpp2::AddZoneIntoGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_3"); - auto* processor = AddZoneIntoGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Add zone into group which group not exist -{ - cpp2::AddZoneIntoGroupReq req; - req.set_group_name("group_not_exist"); - req.set_zone_name("zone_0"); - auto* processor = AddZoneIntoGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND, resp.get_code()); -} -// Add zone into group which zone already exist -{ - cpp2::AddZoneIntoGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_0"); - auto* processor = AddZoneIntoGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_EXISTED, resp.get_code()); -} -// Add zone into group which zone not exist -{ - cpp2::AddZoneIntoGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_not_exist"); - auto* processor = AddZoneIntoGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND, resp.get_code()); -} -// Drop zone from group -{ - cpp2::DropZoneFromGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_3"); - auto* processor = DropZoneFromGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Drop zone from group which group not exist -{ - cpp2::DropZoneFromGroupReq req; - req.set_group_name("group_not_exist"); - req.set_zone_name("zone_0"); - auto* processor = DropZoneFromGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND, resp.get_code()); -} -// Drop zone from group which zone not exist -{ - cpp2::DropZoneFromGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_not_exist"); - auto* processor = DropZoneFromGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_ZONE_NOT_FOUND, resp.get_code()); -} -// Drop Group -{ - cpp2::DropGroupReq req; - req.set_group_name("group_0"); - auto* processor = DropGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Drop Group which is not exist -{ - cpp2::DropGroupReq req; - req.set_group_name("group_0"); - auto* processor = DropGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND, resp.get_code()); -} -// Drop Zone belong to a group -{ - cpp2::DropZoneReq req; - req.set_zone_name("zone_0"); - auto* processor = DropZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_NOT_DROP, resp.get_code()); -} -{ - cpp2::DropGroupReq req; - req.set_group_name("group_1"); - auto* processor = DropGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} // Drop Zone { cpp2::DropZoneReq req; diff --git a/src/meta/test/MetaClientTest.cpp b/src/meta/test/MetaClientTest.cpp index a9e6d6f4723..1a8c6a9318f 100644 --- a/src/meta/test/MetaClientTest.cpp +++ b/src/meta/test/MetaClientTest.cpp @@ -397,22 +397,6 @@ TEST(MetaClientTest, SpaceWithGroupTest) { ASSERT_TRUE(result.ok()); ASSERT_EQ(5, result.value().size()); } -// Add Group -{ - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - auto result = client->addGroup("group_0", std::move(zones)).get(); - ASSERT_TRUE(result.ok()); -} -{ - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3", "zone_4"}; - auto result = client->addGroup("group_1", std::move(zones)).get(); - ASSERT_TRUE(result.ok()); -} -{ - auto result = client->listGroups().get(); - ASSERT_TRUE(result.ok()); - ASSERT_EQ(2, result.value().size()); -} } // namespace meta // Create Space without Group { @@ -426,79 +410,6 @@ TEST(MetaClientTest, SpaceWithGroupTest) { ret = client->createSpace(spaceDesc, true).get(); ASSERT_TRUE(ret.ok()) << ret.status(); } -// Create Space on group_0, replica factor is equal with zone size -{ - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name("space_on_group_0_3"); - spaceDesc.set_partition_num(9); - spaceDesc.set_replica_factor(3); - spaceDesc.set_group_name("group_0"); - auto ret = client->createSpace(spaceDesc).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); - - ret = client->createSpace(spaceDesc, true).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); -} -// Drop Group should failed -{ - auto result = client->dropGroup("group_0").get(); - ASSERT_FALSE(result.ok()); -} -// Create Space on group_0, replica factor is less than zone size -{ - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name("space_on_group_0_1"); - spaceDesc.set_partition_num(9); - spaceDesc.set_replica_factor(1); - spaceDesc.set_group_name("group_0"); - auto ret = client->createSpace(spaceDesc).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); - - ret = client->createSpace(spaceDesc, true).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); -} -// Create Space on group_0, replica factor is larger than zone size -{ - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name("space_on_group_0_4"); - spaceDesc.set_partition_num(9); - spaceDesc.set_replica_factor(4); - spaceDesc.set_group_name("group_0"); - auto ret = client->createSpace(spaceDesc).get(); - ASSERT_FALSE(ret.ok()) << ret.status(); - - ret = client->createSpace(spaceDesc, true).get(); - ASSERT_FALSE(ret.ok()) << ret.status(); -} -{ - auto result = client->addZoneIntoGroup("zone_3", "group_0").get(); - ASSERT_TRUE(result.ok()); -} -{ - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name("space_on_group_0_4"); - spaceDesc.set_partition_num(9); - spaceDesc.set_replica_factor(4); - spaceDesc.set_group_name("group_0"); - auto ret = client->createSpace(spaceDesc).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); - - ret = client->createSpace(spaceDesc, true).get(); - ASSERT_TRUE(ret.ok()) << ret.status(); -} -// Create Space on a group which is not exist -{ - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name("space_on_group_not_exist"); - spaceDesc.set_partition_num(9); - spaceDesc.set_replica_factor(4); - spaceDesc.set_group_name("group_not_exist"); - auto ret = client->createSpace(spaceDesc).get(); - ASSERT_FALSE(ret.ok()) << ret.status(); - - ret = client->createSpace(spaceDesc, true).get(); - ASSERT_FALSE(ret.ok()) << ret.status(); -} } // namespace nebula TEST(MetaClientTest, TagTest) { @@ -1317,100 +1228,6 @@ TEST(MetaClientTest, GroupAndZoneTest) { auto result = client->dropHostFromZone(node, "zone_0").get(); ASSERT_FALSE(result.ok()); } - // Add Group - { - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - auto result = client->addGroup("group_0", std::move(zones)).get(); - ASSERT_TRUE(result.ok()); - } - // Add Group with empty zone name list - { - std::vector zones = {}; - auto result = client->addGroup("group_0", std::move(zones)).get(); - ASSERT_FALSE(result.ok()); - } - // Add Group with duplicate zone name - { - std::vector zones = {"zone_0", "zone_0", "zone_2"}; - auto result = client->addGroup("group_0", std::move(zones)).get(); - ASSERT_FALSE(result.ok()); - } - // Add Group already existed - { - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - auto result = client->addGroup("group_0", std::move(zones)).get(); - ASSERT_FALSE(result.ok()); - } - { - std::vector zones = {"zone_1", "zone_2"}; - auto result = client->addGroup("group_1", std::move(zones)).get(); - ASSERT_TRUE(result.ok()); - } - // Get Group - { - auto result = client->getGroup("group_0").get(); - ASSERT_TRUE(result.ok()); - } - // Get Group which is not exist - { - auto result = client->getGroup("group_not_exist").get(); - ASSERT_FALSE(result.ok()); - } - // List Groups - { - auto result = client->listGroups().get(); - ASSERT_TRUE(result.ok()); - } - { - std::vector nodes = {{"9", 9}, {"10", 10}, {"11", 11}}; - auto result = client->addZone("zone_3", nodes).get(); - ASSERT_TRUE(result.ok()); - } - // Add zone into group - { - auto result = client->addZoneIntoGroup("zone_3", "group_0").get(); - ASSERT_TRUE(result.ok()); - } - // Add zone into group which group not exist - { - auto result = client->addZoneIntoGroup("zone_0", "group_not_exist").get(); - ASSERT_FALSE(result.ok()); - } - // Add zone into group which zone already exist - { - auto result = client->addZoneIntoGroup("zone_0", "group_0").get(); - ASSERT_FALSE(result.ok()); - } - // Add zone into group which zone not exist - { - auto result = client->addZoneIntoGroup("zone_not_exist", "group_0").get(); - ASSERT_FALSE(result.ok()); - } - // Drop zone from group - { - auto result = client->dropZoneFromGroup("zone_3", "group_0").get(); - ASSERT_TRUE(result.ok()); - } - // Drop zone from group which group not exist - { - auto result = client->dropZoneFromGroup("zone_0", "group_not_exist").get(); - ASSERT_FALSE(result.ok()); - } - // Drop zone from group which zone not exist - { - auto result = client->dropZoneFromGroup("zone_not_exist", "group_0").get(); - ASSERT_FALSE(result.ok()); - } - // Drop Group - { - auto result = client->dropGroup("group_0").get(); - ASSERT_TRUE(result.ok()); - } - // Drop Group which is not exist - { - auto result = client->dropGroup("group_0").get(); - ASSERT_FALSE(result.ok()); - } // Drop Zone { auto result = client->dropZone("zone_0").get(); diff --git a/src/meta/test/ProcessorTest.cpp b/src/meta/test/ProcessorTest.cpp index 49e252d53d4..f42f5d4a5d9 100644 --- a/src/meta/test/ProcessorTest.cpp +++ b/src/meta/test/ProcessorTest.cpp @@ -30,12 +30,8 @@ #include "meta/processors/schema/ListEdgesProcessor.h" #include "meta/processors/schema/ListTagsProcessor.h" #include "meta/processors/session/SessionManagerProcessor.h" -#include "meta/processors/zone/AddGroupProcessor.h" #include "meta/processors/zone/AddZoneProcessor.h" -#include "meta/processors/zone/DropGroupProcessor.h" -#include "meta/processors/zone/ListGroupsProcessor.h" #include "meta/processors/zone/ListZonesProcessor.h" -#include "meta/processors/zone/UpdateGroupProcessor.h" #include "meta/processors/zone/UpdateZoneProcessor.h" #include "meta/test/TestUtils.h" @@ -467,298 +463,6 @@ TEST(ProcessorTest, SpaceTest) { } } -TEST(ProcessorTest, SpaceWithGroupTest) { - fs::TempDir rootPath("/tmp/SpaceWithGroupTest.XXXXXX"); - std::unique_ptr kv(MockCluster::initMetaKV(rootPath.path())); - std::vector addresses; - for (int32_t i = 0; i <= 10; i++) { - addresses.emplace_back(std::to_string(i), i); - } - TestUtils::createSomeHosts(kv.get(), std::move(addresses)); - - // Add Zones - {{std::vector nodes; - for (int32_t i = 0; i < 2; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_0"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - std::vector nodes; - for (int32_t i = 2; i < 4; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_1"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - std::vector nodes; - for (int32_t i = 4; i < 6; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_2"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - std::vector nodes; - for (int32_t i = 6; i < 8; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_3"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - std::vector nodes; - for (int32_t i = 8; i < 10; i++) { - nodes.emplace_back(std::to_string(i), i); - } - cpp2::AddZoneReq req; - req.set_zone_name("zone_4"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -} // namespace meta -// List Zones -{ - cpp2::ListZonesReq req; - auto* processor = ListZonesProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(5, resp.get_zones().size()); - ASSERT_EQ("zone_0", resp.get_zones()[0].get_zone_name()); - ASSERT_EQ("zone_1", resp.get_zones()[1].get_zone_name()); - ASSERT_EQ("zone_2", resp.get_zones()[2].get_zone_name()); - ASSERT_EQ("zone_3", resp.get_zones()[3].get_zone_name()); - ASSERT_EQ("zone_4", resp.get_zones()[4].get_zone_name()); -} - -// Add Group -{ - cpp2::AddGroupReq req; - req.set_group_name("group_0"); - std::vector zones = {"zone_0", "zone_1", "zone_2"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - cpp2::AddGroupReq req; - req.set_group_name("group_1"); - std::vector zones = {"zone_0", "zone_1", "zone_2", "zone_3", "zone_4"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// List Groups -{ - cpp2::ListGroupsReq req; - auto* processor = ListGroupsProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - ASSERT_EQ(2, resp.get_groups().size()); - ASSERT_EQ("group_0", resp.get_groups()[0].get_group_name()); - ASSERT_EQ("group_1", resp.get_groups()[1].get_group_name()); -} - -// Create Space without Group -{ - cpp2::SpaceDesc properties; - properties.set_space_name("default_space"); - properties.set_partition_num(9); - properties.set_replica_factor(3); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Create Space on group_0, replica factor is equal with zone size -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_0_3"); - properties.set_partition_num(9); - properties.set_replica_factor(3); - properties.set_group_name("group_0"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Drop Group should failed -{ - cpp2::DropGroupReq req; - req.set_group_name("group_0"); - auto* processor = DropGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_NOT_DROP, resp.get_code()); -} -// Create Space on group_0, replica factor is less than zone size -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_0_1"); - properties.set_partition_num(9); - properties.set_replica_factor(1); - properties.set_group_name("group_0"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Create Space on group_0, replica factor is larger than zone size -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_0_4"); - properties.set_partition_num(9); - properties.set_replica_factor(4); - properties.set_group_name("group_0"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); -} -{ - cpp2::AddZoneIntoGroupReq req; - req.set_group_name("group_0"); - req.set_zone_name("zone_3"); - auto* processor = AddZoneIntoGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_0_4"); - properties.set_partition_num(9); - properties.set_replica_factor(4); - properties.set_group_name("group_0"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); -} -// Create Space on a group which is not exist -{ - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_group_not_exist"); - properties.set_partition_num(9); - properties.set_replica_factor(4); - properties.set_group_name("group_not_exist"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_GROUP_NOT_FOUND, resp.get_code()); -} -// Create space on empty zone -{ - { - std::vector nodes = {HostAddr("10", 10)}; - cpp2::AddZoneReq req; - req.set_zone_name("zone_5"); - req.set_nodes(std::move(nodes)); - auto* processor = AddZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - } - { - cpp2::AddGroupReq req; - req.set_group_name("group_2"); - std::vector zones = {"zone_5"}; - req.set_zone_names(std::move(zones)); - auto* processor = AddGroupProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - } - { - cpp2::DropHostFromZoneReq req; - req.set_zone_name("zone_5"); - HostAddr node{"10", 10}; - req.set_node(std::move(node)); - auto* processor = DropHostFromZoneProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::SUCCEEDED, resp.get_code()); - } - { - cpp2::SpaceDesc properties; - properties.set_space_name("space_on_empty_hosts"); - properties.set_partition_num(1); - properties.set_replica_factor(1); - properties.set_group_name("group_2"); - cpp2::CreateSpaceReq req; - req.set_properties(std::move(properties)); - auto* processor = CreateSpaceProcessor::instance(kv.get()); - auto f = processor->getFuture(); - processor->process(req); - auto resp = std::move(f).get(); - ASSERT_EQ(nebula::cpp2::ErrorCode::E_INVALID_PARM, resp.get_code()); - } -} -} // namespace nebula - TEST(ProcessorTest, CreateTagTest) { fs::TempDir rootPath("/tmp/CreateTagTest.XXXXXX"); auto kv = MockCluster::initMetaKV(rootPath.path()); diff --git a/src/parser/MaintainSentences.cpp b/src/parser/MaintainSentences.cpp index 4b9d5d41028..1a90d068192 100644 --- a/src/parser/MaintainSentences.cpp +++ b/src/parser/MaintainSentences.cpp @@ -358,16 +358,6 @@ std::string ShowCreateEdgeIndexSentence::toString() const { return folly::stringPrintf("SHOW CREATE EDGE INDEX %s", indexName_.get()->c_str()); } -std::string AddGroupSentence::toString() const { - std::string buf; - buf.reserve(64); - buf += "ADD GROUP "; - buf += *groupName_; - buf += " "; - buf += zoneNames_->toString(); - return buf; -} - std::string AddZoneSentence::toString() const { std::string buf; buf.reserve(128); @@ -377,31 +367,16 @@ std::string AddZoneSentence::toString() const { return buf; } -std::string DropGroupSentence::toString() const { - return folly::stringPrintf("DROP GROUP %s", groupName_.get()->c_str()); -} - std::string DropZoneSentence::toString() const { return folly::stringPrintf("DROP ZONE %s", zoneName_.get()->c_str()); } -std::string DescribeGroupSentence::toString() const { - return folly::stringPrintf("DESCRIBE GROUP %s", groupName_.get()->c_str()); -} - std::string DescribeZoneSentence::toString() const { return folly::stringPrintf("DESCRIBE ZONE %s", zoneName_.get()->c_str()); } -std::string ListGroupsSentence::toString() const { return folly::stringPrintf("SHOW GROUPS"); } - std::string ListZonesSentence::toString() const { return folly::stringPrintf("SHOW ZONES"); } -std::string AddZoneIntoGroupSentence::toString() const { - return folly::stringPrintf( - "Add Zone %s Into Group %s", zoneName_.get()->c_str(), groupName_.get()->c_str()); -} - std::string AddHostIntoZoneSentence::toString() const { std::string buf; buf.reserve(64); @@ -412,11 +387,6 @@ std::string AddHostIntoZoneSentence::toString() const { return buf; } -std::string DropZoneFromGroupSentence::toString() const { - return folly::stringPrintf( - "Drop Zone %s From Group %s", zoneName_.get()->c_str(), groupName_.get()->c_str()); -} - std::string DropHostFromZoneSentence::toString() const { std::string buf; buf.reserve(64); diff --git a/src/parser/MaintainSentences.h b/src/parser/MaintainSentences.h index 6f7de95bb26..64a8363300c 100644 --- a/src/parser/MaintainSentences.h +++ b/src/parser/MaintainSentences.h @@ -784,25 +784,6 @@ class ZoneNameList final { std::vector> zones_; }; -class AddGroupSentence : public Sentence { - public: - explicit AddGroupSentence(std::string *groupName, ZoneNameList *zoneNames) { - groupName_.reset(groupName); - zoneNames_.reset(zoneNames); - kind_ = Kind::kAddGroup; - } - - std::string toString() const override; - - const std::string *groupName() const { return groupName_.get(); } - - const ZoneNameList *zoneNames() const { return zoneNames_.get(); } - - private: - std::unique_ptr groupName_; - std::unique_ptr zoneNames_; -}; - class AddZoneSentence : public Sentence { public: explicit AddZoneSentence(std::string *zoneName, HostList *hosts) { @@ -822,21 +803,6 @@ class AddZoneSentence : public Sentence { std::unique_ptr hosts_; }; -class DropGroupSentence : public Sentence { - public: - explicit DropGroupSentence(std::string *groupName) { - groupName_.reset(groupName); - kind_ = Kind::kDropGroup; - } - - std::string toString() const override; - - const std::string *groupName() const { return groupName_.get(); } - - private: - std::unique_ptr groupName_; -}; - class DropZoneSentence : public Sentence { public: explicit DropZoneSentence(std::string *zoneName) { @@ -852,21 +818,6 @@ class DropZoneSentence : public Sentence { std::unique_ptr zoneName_; }; -class DescribeGroupSentence : public Sentence { - public: - explicit DescribeGroupSentence(std::string *groupName) { - groupName_.reset(groupName); - kind_ = Kind::kDescribeGroup; - } - - std::string toString() const override; - - const std::string *groupName() const { return groupName_.get(); } - - private: - std::unique_ptr groupName_; -}; - class DescribeZoneSentence : public Sentence { public: explicit DescribeZoneSentence(std::string *zoneName) { @@ -882,13 +833,6 @@ class DescribeZoneSentence : public Sentence { std::unique_ptr zoneName_; }; -class ListGroupsSentence : public Sentence { - public: - ListGroupsSentence() { kind_ = Kind::kListGroups; } - - std::string toString() const override; -}; - class ListZonesSentence : public Sentence { public: ListZonesSentence() { kind_ = Kind::kListZones; } @@ -896,25 +840,6 @@ class ListZonesSentence : public Sentence { std::string toString() const override; }; -class AddZoneIntoGroupSentence : public Sentence { - public: - AddZoneIntoGroupSentence(std::string *zoneName, std::string *groupName) { - zoneName_.reset(zoneName); - groupName_.reset(groupName); - kind_ = Kind::kAddZoneIntoGroup; - } - - const std::string *zoneName() const { return zoneName_.get(); } - - const std::string *groupName() const { return groupName_.get(); } - - std::string toString() const override; - - private: - std::unique_ptr zoneName_; - std::unique_ptr groupName_; -}; - class AddHostIntoZoneSentence : public Sentence { public: explicit AddHostIntoZoneSentence(HostAddr *address, std::string *zoneName) { @@ -934,25 +859,6 @@ class AddHostIntoZoneSentence : public Sentence { std::unique_ptr address_; }; -class DropZoneFromGroupSentence : public Sentence { - public: - DropZoneFromGroupSentence(std::string *zoneName, std::string *groupName) { - zoneName_.reset(zoneName); - groupName_.reset(groupName); - kind_ = Kind::kDropZoneFromGroup; - } - - const std::string *zoneName() const { return zoneName_.get(); } - - const std::string *groupName() const { return groupName_.get(); } - - std::string toString() const override; - - private: - std::unique_ptr zoneName_; - std::unique_ptr groupName_; -}; - class DropHostFromZoneSentence : public Sentence { public: explicit DropHostFromZoneSentence(HostAddr *address, std::string *zoneName) { diff --git a/src/parser/Sentence.h b/src/parser/Sentence.h index f89801967ff..ade75177b17 100644 --- a/src/parser/Sentence.h +++ b/src/parser/Sentence.h @@ -111,12 +111,6 @@ class Sentence { kAdminJob, kAdminShowJobs, kGetSubgraph, - kAddGroup, - kDropGroup, - kDescribeGroup, - kListGroups, - kAddZoneIntoGroup, - kDropZoneFromGroup, kAddZone, kDropZone, kDescribeZone, diff --git a/src/parser/parser.yy b/src/parser/parser.yy index 0d2c2695029..a656f3aa0d3 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -120,7 +120,6 @@ static constexpr size_t kCommentLengthLimit = 256; nebula::GroupClause *group_clause; nebula::HostList *host_list; nebula::HostAddr *host_item; - nebula::ZoneNameList *zone_name_list; std::vector *integer_list; nebula::InBoundClause *in_bound_clause; nebula::OutBoundClause *out_bound_clause; @@ -333,7 +332,6 @@ static constexpr size_t kCommentLengthLimit = 256; %type column_spec %type column_spec_list %type column_name_list -%type zone_name_list %type role_type_clause %type acl_item_clause @@ -354,8 +352,6 @@ static constexpr size_t kCommentLengthLimit = 256; %type drop_tag_index_sentence drop_edge_index_sentence drop_fulltext_index_sentence %type describe_tag_index_sentence describe_edge_index_sentence %type rebuild_tag_index_sentence rebuild_edge_index_sentence rebuild_fulltext_index_sentence -%type add_group_sentence drop_group_sentence desc_group_sentence -%type add_zone_into_group_sentence drop_zone_from_group_sentence %type add_zone_sentence drop_zone_sentence desc_zone_sentence %type add_host_into_zone_sentence drop_host_from_zone_sentence %type create_snapshot_sentence drop_snapshot_sentence @@ -2632,38 +2628,6 @@ rebuild_fulltext_index_sentence meta::cpp2::AdminCmd::REBUILD_FULLTEXT_INDEX); } -add_group_sentence - : KW_ADD KW_GROUP name_label zone_name_list { - $$ = new AddGroupSentence($3, $4); - } - ; - -drop_group_sentence - : KW_DROP KW_GROUP name_label { - $$ = new DropGroupSentence($3); - } - ; - -desc_group_sentence - : KW_DESCRIBE KW_GROUP name_label { - $$ = new DescribeGroupSentence($3); - } - | KW_DESC KW_GROUP name_label { - $$ = new DescribeGroupSentence($3); - } - ; - -add_zone_into_group_sentence - : KW_ADD KW_ZONE name_label KW_INTO KW_GROUP name_label { - $$ = new AddZoneIntoGroupSentence($3, $6); - } - ; - -drop_zone_from_group_sentence - : KW_DROP KW_ZONE name_label KW_FROM KW_GROUP name_label { - $$ = new DropZoneFromGroupSentence($3, $6); - } - ; add_zone_sentence : KW_ADD KW_ZONE name_label host_list { @@ -3188,9 +3152,6 @@ show_sentence | KW_SHOW KW_COLLATION { $$ = new ShowCollationSentence(); } - | KW_SHOW KW_GROUPS { - $$ = new ListGroupsSentence(); - } | KW_SHOW KW_ZONES { $$ = new ListZonesSentence(); } @@ -3259,17 +3220,6 @@ show_config_item } ; -zone_name_list - : name_label { - $$ = new ZoneNameList(); - $$->addZone($1); - } - | zone_name_list COMMA name_label { - $$ = $1; - $$->addZone($3); - } - ; - create_space_sentence : KW_CREATE KW_SPACE opt_if_not_exists name_label { auto sentence = new CreateSpaceSentence($4, $3); @@ -3628,11 +3578,6 @@ maintain_sentence | rebuild_tag_index_sentence { $$ = $1; } | rebuild_edge_index_sentence { $$ = $1; } | rebuild_fulltext_index_sentence { $$ = $1; } - | add_group_sentence { $$ = $1; } - | drop_group_sentence { $$ = $1; } - | desc_group_sentence { $$ = $1; } - | add_zone_into_group_sentence { $$ = $1; } - | drop_zone_from_group_sentence { $$ = $1; } | add_zone_sentence { $$ = $1; } | drop_zone_sentence { $$ = $1; } | desc_zone_sentence { $$ = $1; } diff --git a/src/parser/test/ParserTest.cpp b/src/parser/test/ParserTest.cpp index 5267339a4c2..f1249614e67 100644 --- a/src/parser/test/ParserTest.cpp +++ b/src/parser/test/ParserTest.cpp @@ -2710,11 +2710,6 @@ TEST_F(ParserTest, MatchListSubscriptRange) { } TEST_F(ParserTest, Zone) { - { - std::string query = "SHOW GROUPS"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } { std::string query = "SHOW ZONES"; auto result = parse(query); @@ -2750,36 +2745,6 @@ TEST_F(ParserTest, Zone) { auto result = parse(query); ASSERT_TRUE(result.ok()) << result.status(); } - { - std::string query = "ADD GROUP group_0 zone_0,zone_1,zone_2"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } - { - std::string query = "ADD ZONE zone_3 INTO GROUP group_0"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } - { - std::string query = "DROP ZONE zone_3 FROM GROUP group_0"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } - { - std::string query = "DESC GROUP group_0"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } - { - std::string query = "DESCRIBE GROUP group_0"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } - { - std::string query = "DROP GROUP group_0"; - auto result = parse(query); - ASSERT_TRUE(result.ok()) << result.status(); - } } TEST_F(ParserTest, FullText) { diff --git a/tests/maintain/test_zone.py b/tests/maintain/test_zone.py index bfa504d6bc6..9e74d876af9 100644 --- a/tests/maintain/test_zone.py +++ b/tests/maintain/test_zone.py @@ -36,39 +36,6 @@ def test_zone(self): resp = self.client.execute('SHOW ZONES') self.check_resp_succeeded(resp) - # Add Group - resp = self.client.execute('ADD GROUP group_0 zone_0') - self.check_resp_succeeded(resp) - - resp = self.client.execute('ADD GROUP default zone_0') - self.check_resp_failed(resp) - - # Get Group - resp = self.client.execute('DESC GROUP group_0') - self.check_resp_succeeded(resp) - - resp = self.client.execute('DESCRIBE GROUP group_0') - self.check_resp_succeeded(resp) - - # Get Group which is not exist - resp = self.client.execute('DESC GROUP group_not_exist') - self.check_resp_failed(resp) - - resp = self.client.execute('DESCRIBE GROUP group_not_exist') - self.check_resp_failed(resp) - - # SHOW Groups - resp = self.client.execute('SHOW GROUPS') - self.check_resp_succeeded(resp) - - # Drop Group - resp = self.client.execute('DROP GROUP group_0') - self.check_resp_succeeded(resp) - - # Drop Group which is not exist - resp = self.client.execute('DROP GROUP group_0') - self.check_resp_failed(resp) - # Drop Zone resp = self.client.execute('DROP ZONE zone_0') self.check_resp_succeeded(resp) From 65fdeedb76382ae3ee795d5f47f11ff4e2789d7c Mon Sep 17 00:00:00 2001 From: "kyle.cao" Date: Wed, 24 Nov 2021 21:05:41 +0800 Subject: [PATCH 45/53] disable yield var (#3271) * disable yield var fix tck * small fix * small fix --- src/graph/optimizer/rule/TopNRule.cpp | 1 + src/graph/util/ExpressionUtils.cpp | 10 +++++++ src/graph/util/ExpressionUtils.h | 2 ++ src/parser/parser.yy | 7 +++++ .../bugfix/VariableExpression.feature | 26 +++++++++++++++++++ .../features/go/GroupbyLimit.IntVid.feature | 2 +- tests/tck/features/go/GroupbyLimit.feature | 2 +- 7 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 tests/tck/features/bugfix/VariableExpression.feature diff --git a/src/graph/optimizer/rule/TopNRule.cpp b/src/graph/optimizer/rule/TopNRule.cpp index 194ec2788bd..17171fc4872 100644 --- a/src/graph/optimizer/rule/TopNRule.cpp +++ b/src/graph/optimizer/rule/TopNRule.cpp @@ -4,6 +4,7 @@ */ #include "graph/optimizer/rule/TopNRule.h" + #include "graph/optimizer/OptContext.h" #include "graph/optimizer/OptGroup.h" #include "graph/planner/plan/PlanNode.h" diff --git a/src/graph/util/ExpressionUtils.cpp b/src/graph/util/ExpressionUtils.cpp index aff0c9f14ce..ee3c7913ad0 100644 --- a/src/graph/util/ExpressionUtils.cpp +++ b/src/graph/util/ExpressionUtils.cpp @@ -63,6 +63,16 @@ std::vector ExpressionUtils::collectAll( return std::move(visitor).results(); } +bool ExpressionUtils::checkVarExprIfExist(const Expression *expr) { + auto vars = ExpressionUtils::collectAll(expr, {Expression::Kind::kVar}); + for (auto *var : vars) { + if (!static_cast(var)->isInner()) { + return true; + } + } + return false; +} + std::vector ExpressionUtils::findAllStorage(const Expression *expr) { return collectAll(expr, {Expression::Kind::kTagProperty, diff --git a/src/graph/util/ExpressionUtils.h b/src/graph/util/ExpressionUtils.h index 484f2db4127..c6d1fa5b87b 100644 --- a/src/graph/util/ExpressionUtils.h +++ b/src/graph/util/ExpressionUtils.h @@ -44,6 +44,8 @@ class ExpressionUtils { static std::vector collectAll( const Expression* self, const std::unordered_set& expected); + static bool checkVarExprIfExist(const Expression* expr); + static std::vector findAllStorage(const Expression* expr); static std::vector findAllInputVariableProp(const Expression* expr); diff --git a/src/parser/parser.yy b/src/parser/parser.yy index a656f3aa0d3..a30907dd26e 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -1444,9 +1444,16 @@ yield_column delete $3; } | expression { + if (graph::ExpressionUtils::checkVarExprIfExist($1)) { + throw nebula::GraphParser::syntax_error(@1, "Direct output of variable is prohibited"); + } $$ = new YieldColumn($1); } | expression KW_AS name_label { + if (graph::ExpressionUtils::checkVarExprIfExist($1)) { + delete $3; + throw nebula::GraphParser::syntax_error(@1, "Direct output of variable is prohibited"); + } $$ = new YieldColumn($1, *$3); delete $3; } diff --git a/tests/tck/features/bugfix/VariableExpression.feature b/tests/tck/features/bugfix/VariableExpression.feature new file mode 100644 index 00000000000..d95560d681f --- /dev/null +++ b/tests/tck/features/bugfix/VariableExpression.feature @@ -0,0 +1,26 @@ +# Copyright (c) 2021 vesoft inc. All rights reserved. +# +# This source code is licensed under Apache 2.0 License, +# attached with Common Clause Condition 1.0, found in the LICENSES directory. +Feature: Variable usage + + Background: + Given a graph with space named "nba" + + Scenario: disable yield $var + When executing query: + """ + $var = yield 1;$var2 = yield 3;yield $var1 + $var2 + """ + Then a SyntaxError should be raised at runtime: Direct output of variable is prohibited near `$var1 + $var2' + When executing query: + """ + $var=go from "Tim Duncan" over like yield like._dst as dst;yield $var + """ + Then a SyntaxError should be raised at runtime: Direct output of variable is prohibited near `$var' + Then drop the used space + When executing query: + """ + $var=go from "Tim Duncan" over like yield like._dst as dst;yield $var[0][0] + """ + Then a SyntaxError should be raised at runtime: Direct output of variable is prohibited near `$var[0][0]' diff --git a/tests/tck/features/go/GroupbyLimit.IntVid.feature b/tests/tck/features/go/GroupbyLimit.IntVid.feature index 86e457460f6..384c12928a8 100644 --- a/tests/tck/features/go/GroupbyLimit.IntVid.feature +++ b/tests/tck/features/go/GroupbyLimit.IntVid.feature @@ -15,7 +15,7 @@ Feature: Groupby & limit Sentence """ GO FROM hash("Marco Belinelli") OVER serve YIELD $$.team.name AS name | GROUP BY $-.start_year YIELD COUNT($var) """ - Then a SemanticError should be raised at runtime: + Then a SyntaxError should be raised at runtime: Scenario: Syntax test4 When executing query: diff --git a/tests/tck/features/go/GroupbyLimit.feature b/tests/tck/features/go/GroupbyLimit.feature index 2a282a2b7b0..7706c27c6b2 100644 --- a/tests/tck/features/go/GroupbyLimit.feature +++ b/tests/tck/features/go/GroupbyLimit.feature @@ -22,7 +22,7 @@ Feature: Groupby & limit Sentence """ GO FROM "Marco Belinelli" OVER serve YIELD $$.team.name AS name | GROUP BY $-.start_year YIELD COUNT($var) """ - Then a SemanticError should be raised at runtime: + Then a SyntaxError should be raised at runtime: Scenario: Syntax test4 When executing query: From 6a8a5f751ae1389433efba33f5380e160d648f54 Mon Sep 17 00:00:00 2001 From: Shylock Hg <33566796+Shylock-Hg@users.noreply.github.com> Date: Thu, 25 Nov 2021 09:59:46 +0800 Subject: [PATCH 46/53] Support filter in scan. (#3329) * Support filter in scan. * Fix compile error. * Clear expression context when after filter. Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> --- .../context/StorageExpressionContext.h | 2 +- src/storage/exec/ScanNode.h | 78 +++++++++++++++---- src/storage/query/GetNeighborsProcessor.cpp | 8 +- src/storage/query/QueryBaseProcessor-inl.h | 41 ++++++++-- src/storage/query/QueryBaseProcessor.h | 3 +- src/storage/query/ScanEdgeProcessor.cpp | 26 +++++-- src/storage/query/ScanEdgeProcessor.h | 7 +- src/storage/query/ScanVertexProcessor.cpp | 43 ++++++---- src/storage/query/ScanVertexProcessor.h | 7 +- src/storage/test/ScanEdgeTest.cpp | 43 ++++++++++ src/storage/test/ScanVertexTest.cpp | 65 ++++++++++++++++ src/storage/test/UpdateEdgeTest.cpp | 3 +- 12 files changed, 274 insertions(+), 52 deletions(-) diff --git a/src/storage/context/StorageExpressionContext.h b/src/storage/context/StorageExpressionContext.h index 65b989594a3..38ef72eba39 100644 --- a/src/storage/context/StorageExpressionContext.h +++ b/src/storage/context/StorageExpressionContext.h @@ -147,7 +147,7 @@ class StorageExpressionContext final : public ExpressionContext { size_t vIdLen_; bool isIntId_; - RowReader* reader_; + RowReader* reader_{nullptr}; std::string key_; // tag or edge name std::string name_; diff --git a/src/storage/exec/ScanNode.h b/src/storage/exec/ScanNode.h index 5af7912f758..22425dcb90b 100644 --- a/src/storage/exec/ScanNode.h +++ b/src/storage/exec/ScanNode.h @@ -13,6 +13,8 @@ namespace storage { using Cursor = std::string; +inline bool vTrue(const Value& v) { return v.isBool() && v.getBool(); } + // Node to scan vertices of one partition class ScanVertexPropNode : public QueryNode { public: @@ -23,13 +25,17 @@ class ScanVertexPropNode : public QueryNode { bool enableReadFollower, int64_t limit, std::unordered_map* cursors, - nebula::DataSet* resultDataSet) + nebula::DataSet* resultDataSet, + StorageExpressionContext* expCtx = nullptr, + Expression* filter = nullptr) : context_(context), tagNodes_(std::move(tagNodes)), enableReadFollower_(enableReadFollower), limit_(limit), cursors_(cursors), - resultDataSet_(resultDataSet) { + resultDataSet_(resultDataSet), + expCtx_(expCtx), + filter_(filter) { name_ = "ScanVertexPropNode"; for (std::size_t i = 0; i < tagNodes_.size(); ++i) { tagNodesIndex_.emplace(tagNodes_[i]->tagId(), i); @@ -110,20 +116,36 @@ class ScanVertexPropNode : public QueryNode { })) { for (auto& tagNode : tagNodes_) { ret = tagNode->collectTagPropsIfValid( - [&row](const std::vector* props) -> nebula::cpp2::ErrorCode { + [&row, tagNode = tagNode.get(), this]( + const std::vector* props) -> nebula::cpp2::ErrorCode { for (const auto& prop : *props) { if (prop.returned_) { row.emplace_back(Value()); } + if (prop.filtered_ && expCtx_ != nullptr) { + expCtx_->setTagProp(tagNode->getTagName(), prop.name_, Value()); + } } return nebula::cpp2::ErrorCode::SUCCEEDED; }, - [&row, vIdLen, isIntId]( + [&row, vIdLen, isIntId, tagNode = tagNode.get(), this]( folly::StringPiece key, RowReader* reader, const std::vector* props) -> nebula::cpp2::ErrorCode { - if (!QueryUtils::collectVertexProps(key, vIdLen, isIntId, reader, props, row).ok()) { - return nebula::cpp2::ErrorCode::E_TAG_PROP_NOT_FOUND; + for (const auto& prop : *props) { + if (prop.returned_ || (prop.filtered_ && expCtx_ != nullptr)) { + auto value = QueryUtils::readVertexProp(key, vIdLen, isIntId, reader, prop); + if (!value.ok()) { + return nebula::cpp2::ErrorCode::E_TAG_PROP_NOT_FOUND; + } + if (prop.filtered_ && expCtx_ != nullptr) { + expCtx_->setTagProp(tagNode->getTagName(), prop.name_, value.value()); + } + if (prop.returned_) { + VLOG(2) << "Collect prop " << prop.name_; + row.emplace_back(std::move(value).value()); + } + } } return nebula::cpp2::ErrorCode::SUCCEEDED; }); @@ -131,9 +153,11 @@ class ScanVertexPropNode : public QueryNode { break; } } - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED && + (filter_ == nullptr || vTrue(filter_->eval(*expCtx_)))) { resultDataSet_->rows.emplace_back(std::move(row)); } + expCtx_->clear(); for (auto& tagNode : tagNodes_) { tagNode->clear(); } @@ -149,6 +173,8 @@ class ScanVertexPropNode : public QueryNode { // cursors for next scan std::unordered_map* cursors_; nebula::DataSet* resultDataSet_; + StorageExpressionContext* expCtx_{nullptr}; + Expression* filter_{nullptr}; }; // Node to scan edge of one partition @@ -161,13 +187,17 @@ class ScanEdgePropNode : public QueryNode { bool enableReadFollower, int64_t limit, std::unordered_map* cursors, - nebula::DataSet* resultDataSet) + nebula::DataSet* resultDataSet, + StorageExpressionContext* expCtx = nullptr, + Expression* filter = nullptr) : context_(context), edgeNodes_(std::move(edgeNodes)), enableReadFollower_(enableReadFollower), limit_(limit), cursors_(cursors), - resultDataSet_(resultDataSet) { + resultDataSet_(resultDataSet), + expCtx_(expCtx), + filter_(filter) { QueryNode::name_ = "ScanEdgePropNode"; for (std::size_t i = 0; i < edgeNodes_.size(); ++i) { edgeNodesIndex_.emplace(edgeNodes_[i]->edgeType(), i); @@ -230,20 +260,36 @@ class ScanEdgePropNode : public QueryNode { nebula::cpp2::ErrorCode ret = nebula::cpp2::ErrorCode::SUCCEEDED; for (auto& edgeNode : edgeNodes_) { ret = edgeNode->collectEdgePropsIfValid( - [&row](const std::vector* props) -> nebula::cpp2::ErrorCode { + [&row, edgeNode = edgeNode.get(), this]( + const std::vector* props) -> nebula::cpp2::ErrorCode { for (const auto& prop : *props) { if (prop.returned_) { row.emplace_back(Value()); } + if (prop.filtered_ && expCtx_ != nullptr) { + expCtx_->setEdgeProp(edgeNode->getEdgeName(), prop.name_, Value()); + } } return nebula::cpp2::ErrorCode::SUCCEEDED; }, - [&row, vIdLen, isIntId]( + [&row, vIdLen, isIntId, edgeNode = edgeNode.get(), this]( folly::StringPiece key, RowReader* reader, const std::vector* props) -> nebula::cpp2::ErrorCode { - if (!QueryUtils::collectEdgeProps(key, vIdLen, isIntId, reader, props, row).ok()) { - return nebula::cpp2::ErrorCode::E_EDGE_PROP_NOT_FOUND; + for (const auto& prop : *props) { + if (prop.returned_ || (prop.filtered_ && expCtx_ != nullptr)) { + auto value = QueryUtils::readEdgeProp(key, vIdLen, isIntId, reader, prop); + if (!value.ok()) { + return nebula::cpp2::ErrorCode::E_EDGE_PROP_NOT_FOUND; + } + if (prop.filtered_ && expCtx_ != nullptr) { + expCtx_->setEdgeProp(edgeNode->getEdgeName(), prop.name_, value.value()); + } + if (prop.returned_) { + VLOG(2) << "Collect prop " << prop.name_; + row.emplace_back(std::move(value).value()); + } + } } return nebula::cpp2::ErrorCode::SUCCEEDED; }); @@ -251,9 +297,11 @@ class ScanEdgePropNode : public QueryNode { break; } } - if (ret == nebula::cpp2::ErrorCode::SUCCEEDED) { + if (ret == nebula::cpp2::ErrorCode::SUCCEEDED && + (filter_ == nullptr || vTrue(filter_->eval(*expCtx_)))) { resultDataSet_->rows.emplace_back(std::move(row)); } + expCtx_->clear(); for (auto& edgeNode : edgeNodes_) { edgeNode->clear(); } @@ -268,6 +316,8 @@ class ScanEdgePropNode : public QueryNode { // cursors for next scan std::unordered_map* cursors_; nebula::DataSet* resultDataSet_; + StorageExpressionContext* expCtx_{nullptr}; + Expression* filter_{nullptr}; }; } // namespace storage diff --git a/src/storage/query/GetNeighborsProcessor.cpp b/src/storage/query/GetNeighborsProcessor.cpp index 149a7c9018b..5d08b2ab7e3 100644 --- a/src/storage/query/GetNeighborsProcessor.cpp +++ b/src/storage/query/GetNeighborsProcessor.cpp @@ -284,7 +284,13 @@ nebula::cpp2::ErrorCode GetNeighborsProcessor::checkAndBuildContexts( if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { return code; } - code = buildFilter(req); + code = buildFilter(req, [](const cpp2::GetNeighborsRequest& r) -> const std::string* { + if (r.get_traverse_spec().filter_ref().has_value()) { + return r.get_traverse_spec().get_filter(); + } else { + return nullptr; + } + }); if (code != nebula::cpp2::ErrorCode::SUCCEEDED) { return code; } diff --git a/src/storage/query/QueryBaseProcessor-inl.h b/src/storage/query/QueryBaseProcessor-inl.h index f472f265576..47b8f48c54f 100644 --- a/src/storage/query/QueryBaseProcessor-inl.h +++ b/src/storage/query/QueryBaseProcessor-inl.h @@ -129,18 +129,18 @@ nebula::cpp2::ErrorCode QueryBaseProcessor::buildYields(const REQ& re } template -nebula::cpp2::ErrorCode QueryBaseProcessor::buildFilter(const REQ& req) { - const auto& traverseSpec = req.get_traverse_spec(); - if (!traverseSpec.filter_ref().has_value()) { +nebula::cpp2::ErrorCode QueryBaseProcessor::buildFilter( + const REQ& req, std::function&& getFilter) { + const auto* filterStr = getFilter(req); + if (filterStr == nullptr) { return nebula::cpp2::ErrorCode::SUCCEEDED; } - const auto& filterStr = *traverseSpec.filter_ref(); auto pool = &this->planContext_->objPool_; // auto v = env_; - if (!filterStr.empty()) { + if (!filterStr->empty()) { // the filter expression **must** return a bool - filter_ = Expression::decode(pool, filterStr); + filter_ = Expression::decode(pool, *filterStr); if (filter_ == nullptr) { return nebula::cpp2::ErrorCode::E_INVALID_FILTER; } @@ -438,8 +438,9 @@ nebula::cpp2::ErrorCode QueryBaseProcessor::checkExp(const Expression } return nebula::cpp2::ErrorCode::SUCCEEDED; } + case Expression::Kind::kTagProperty: case Expression::Kind::kSrcProperty: { - auto* sourceExp = static_cast(exp); + auto* sourceExp = static_cast(exp); const auto& tagName = sourceExp->sym(); const auto& propName = sourceExp->prop(); auto tagRet = this->env_->schemaMan_->toTagID(spaceId_, tagName); @@ -458,6 +459,17 @@ nebula::cpp2::ErrorCode QueryBaseProcessor::checkExp(const Expression const auto& tagSchema = iter->second.back(); if (propName == kVid || propName == kTag) { + if (returned || filtered) { + addPropContextIfNotExists(tagContext_.propContexts_, + tagContext_.indexMap_, + tagContext_.tagNames_, + tagId, + tagName, + propName, + nullptr, + returned, + filtered); + } return nebula::cpp2::ErrorCode::SUCCEEDED; } @@ -505,6 +517,17 @@ nebula::cpp2::ErrorCode QueryBaseProcessor::checkExp(const Expression const auto& edgeSchema = iter->second.back(); if (propName == kSrc || propName == kType || propName == kRank || propName == kDst) { + if (returned || filtered) { + addPropContextIfNotExists(edgeContext_.propContexts_, + edgeContext_.indexMap_, + edgeContext_.edgeNames_, + edgeType, + edgeName, + propName, + nullptr, + returned, + filtered); + } return nebula::cpp2::ErrorCode::SUCCEEDED; } @@ -561,7 +584,6 @@ nebula::cpp2::ErrorCode QueryBaseProcessor::checkExp(const Expression case Expression::Kind::kSubscript: case Expression::Kind::kAttribute: case Expression::Kind::kLabelAttribute: - case Expression::Kind::kTagProperty: case Expression::Kind::kVertex: case Expression::Kind::kEdge: case Expression::Kind::kLabel: @@ -597,6 +619,9 @@ void QueryBaseProcessor::addPropContextIfNotExists( bool filtered, const std::pair* statInfo) { auto idxIter = indexMap.find(entryId); + if (idxIter == indexMap.end()) { // for edge type + idxIter = indexMap.find(-entryId); + } if (idxIter == indexMap.end()) { // if no property of tag/edge has been add to propContexts PropContext ctx(propName.c_str(), field, returned, filtered, statInfo); diff --git a/src/storage/query/QueryBaseProcessor.h b/src/storage/query/QueryBaseProcessor.h index 3f75b0c5335..00dd8e97247 100644 --- a/src/storage/query/QueryBaseProcessor.h +++ b/src/storage/query/QueryBaseProcessor.h @@ -154,7 +154,8 @@ class QueryBaseProcessor : public BaseProcessor { // build edgeContexts_ according to return props nebula::cpp2::ErrorCode handleEdgeProps(std::vector& edgeProps); - nebula::cpp2::ErrorCode buildFilter(const REQ& req); + nebula::cpp2::ErrorCode buildFilter( + const REQ& req, std::function&& getFilter); nebula::cpp2::ErrorCode buildYields(const REQ& req); // build ttl info map diff --git a/src/storage/query/ScanEdgeProcessor.cpp b/src/storage/query/ScanEdgeProcessor.cpp index 5da9b6425e6..94a6a03a1b2 100644 --- a/src/storage/query/ScanEdgeProcessor.cpp +++ b/src/storage/query/ScanEdgeProcessor.cpp @@ -64,6 +64,13 @@ nebula::cpp2::ErrorCode ScanEdgeProcessor::checkAndBuildContexts(const cpp2::Sca std::vector returnProps = {*req.return_columns_ref()}; ret = handleEdgeProps(returnProps); buildEdgeColName(returnProps); + ret = buildFilter(req, [](const cpp2::ScanEdgeRequest& r) -> const std::string* { + if (r.filter_ref().has_value()) { + return r.get_filter(); + } else { + return nullptr; + } + }); return ret; } @@ -85,7 +92,8 @@ void ScanEdgeProcessor::onProcessFinished() { StoragePlan ScanEdgeProcessor::buildPlan( RuntimeContext* context, nebula::DataSet* result, - std::unordered_map* cursors) { + std::unordered_map* cursors, + StorageExpressionContext* expCtx) { StoragePlan plan; std::vector> edges; for (const auto& ec : edgeContext_.propContexts_) { @@ -93,7 +101,7 @@ StoragePlan ScanEdgeProcessor::buildPlan( std::make_unique(context, &edgeContext_, ec.first, &ec.second)); } auto output = std::make_unique( - context, std::move(edges), enableReadFollower_, limit_, cursors, result); + context, std::move(edges), enableReadFollower_, limit_, cursors, result, expCtx, filter_); plan.addNode(std::move(output)); return plan; @@ -104,10 +112,11 @@ folly::Future> ScanEdgeProcessor nebula::DataSet* result, std::unordered_map* cursors, PartitionID partId, - Cursor cursor) { + Cursor cursor, + StorageExpressionContext* expCtx) { return folly::via(executor_, - [this, context, result, cursors, partId, input = std::move(cursor)]() { - auto plan = buildPlan(context, result, cursors); + [this, context, result, cursors, partId, input = std::move(cursor), expCtx]() { + auto plan = buildPlan(context, result, cursors, expCtx); auto ret = plan.go(partId, input); if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { @@ -119,8 +128,9 @@ folly::Future> ScanEdgeProcessor void ScanEdgeProcessor::runInSingleThread(const cpp2::ScanEdgeRequest& req) { contexts_.emplace_back(RuntimeContext(planContext_.get())); + expCtxs_.emplace_back(StorageExpressionContext(spaceVidLen_, isIntId_)); std::unordered_set failedParts; - auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_); + auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_, &expCtxs_.front()); for (const auto& partEntry : req.get_parts()) { auto partId = partEntry.first; auto cursor = partEntry.second; @@ -142,6 +152,7 @@ void ScanEdgeProcessor::runInMultipleThread(const cpp2::ScanEdgeRequest& req) { nebula::DataSet result = resultDataSet_; results_.emplace_back(std::move(result)); contexts_.emplace_back(RuntimeContext(planContext_.get())); + expCtxs_.emplace_back(StorageExpressionContext(spaceVidLen_, isIntId_)); } size_t i = 0; std::vector>> futures; @@ -150,7 +161,8 @@ void ScanEdgeProcessor::runInMultipleThread(const cpp2::ScanEdgeRequest& req) { &results_[i], &cursorsOfPart_[i], partId, - cursor.get_has_next() ? *cursor.get_next_cursor() : "")); + cursor.get_has_next() ? *cursor.get_next_cursor() : "", + &expCtxs_[i])); i++; } diff --git a/src/storage/query/ScanEdgeProcessor.h b/src/storage/query/ScanEdgeProcessor.h index f1931cd881b..40c5186975d 100644 --- a/src/storage/query/ScanEdgeProcessor.h +++ b/src/storage/query/ScanEdgeProcessor.h @@ -39,14 +39,16 @@ class ScanEdgeProcessor : public QueryBaseProcessor buildPlan(RuntimeContext* context, nebula::DataSet* result, - std::unordered_map* cursors); + std::unordered_map* cursors, + StorageExpressionContext* expCtx); folly::Future> runInExecutor( RuntimeContext* context, nebula::DataSet* result, std::unordered_map* cursors, PartitionID partId, - Cursor cursor); + Cursor cursor, + StorageExpressionContext* expCtx); void runInSingleThread(const cpp2::ScanEdgeRequest& req); @@ -55,6 +57,7 @@ class ScanEdgeProcessor : public QueryBaseProcessor contexts_; + std::vector expCtxs_; std::vector results_; std::vector> cursorsOfPart_; diff --git a/src/storage/query/ScanVertexProcessor.cpp b/src/storage/query/ScanVertexProcessor.cpp index 032ec103660..bb9b3a705ad 100644 --- a/src/storage/query/ScanVertexProcessor.cpp +++ b/src/storage/query/ScanVertexProcessor.cpp @@ -65,6 +65,13 @@ nebula::cpp2::ErrorCode ScanVertexProcessor::checkAndBuildContexts( std::vector returnProps = *req.return_columns_ref(); ret = handleVertexProps(returnProps); buildTagColName(returnProps); + ret = buildFilter(req, [](const cpp2::ScanVertexRequest& r) -> const std::string* { + if (r.filter_ref().has_value()) { + return r.get_filter(); + } else { + return nullptr; + } + }); return ret; } @@ -87,14 +94,15 @@ void ScanVertexProcessor::onProcessFinished() { StoragePlan ScanVertexProcessor::buildPlan( RuntimeContext* context, nebula::DataSet* result, - std::unordered_map* cursors) { + std::unordered_map* cursors, + StorageExpressionContext* expCtx) { StoragePlan plan; std::vector> tags; for (const auto& tc : tagContext_.propContexts_) { tags.emplace_back(std::make_unique(context, &tagContext_, tc.first, &tc.second)); } auto output = std::make_unique( - context, std::move(tags), enableReadFollower_, limit_, cursors, result); + context, std::move(tags), enableReadFollower_, limit_, cursors, result, expCtx, filter_); plan.addNode(std::move(output)); return plan; @@ -105,23 +113,26 @@ folly::Future> ScanVertexProcess nebula::DataSet* result, std::unordered_map* cursorsOfPart, PartitionID partId, - Cursor cursor) { - return folly::via(executor_, - [this, context, result, cursorsOfPart, partId, input = std::move(cursor)]() { - auto plan = buildPlan(context, result, cursorsOfPart); - - auto ret = plan.go(partId, input); - if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { - return std::make_pair(ret, partId); - } - return std::make_pair(nebula::cpp2::ErrorCode::SUCCEEDED, partId); - }); + Cursor cursor, + StorageExpressionContext* expCtx) { + return folly::via( + executor_, + [this, context, result, cursorsOfPart, partId, input = std::move(cursor), expCtx]() { + auto plan = buildPlan(context, result, cursorsOfPart, expCtx); + + auto ret = plan.go(partId, input); + if (ret != nebula::cpp2::ErrorCode::SUCCEEDED) { + return std::make_pair(ret, partId); + } + return std::make_pair(nebula::cpp2::ErrorCode::SUCCEEDED, partId); + }); } void ScanVertexProcessor::runInSingleThread(const cpp2::ScanVertexRequest& req) { contexts_.emplace_back(RuntimeContext(planContext_.get())); + expCtxs_.emplace_back(StorageExpressionContext(spaceVidLen_, isIntId_)); std::unordered_set failedParts; - auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_); + auto plan = buildPlan(&contexts_.front(), &resultDataSet_, &cursors_, &expCtxs_.front()); for (const auto& partEntry : req.get_parts()) { auto partId = partEntry.first; auto cursor = partEntry.second; @@ -143,6 +154,7 @@ void ScanVertexProcessor::runInMultipleThread(const cpp2::ScanVertexRequest& req nebula::DataSet result = resultDataSet_; results_.emplace_back(std::move(result)); contexts_.emplace_back(RuntimeContext(planContext_.get())); + expCtxs_.emplace_back(StorageExpressionContext(spaceVidLen_, isIntId_)); } size_t i = 0; std::vector>> futures; @@ -151,7 +163,8 @@ void ScanVertexProcessor::runInMultipleThread(const cpp2::ScanVertexRequest& req &results_[i], &cursorsOfPart_[i], partId, - cursor.get_has_next() ? *cursor.get_next_cursor() : "")); + cursor.get_has_next() ? *cursor.get_next_cursor() : "", + &expCtxs_[i])); i++; } diff --git a/src/storage/query/ScanVertexProcessor.h b/src/storage/query/ScanVertexProcessor.h index 987c77e2edd..39b34aedae9 100644 --- a/src/storage/query/ScanVertexProcessor.h +++ b/src/storage/query/ScanVertexProcessor.h @@ -40,14 +40,16 @@ class ScanVertexProcessor StoragePlan buildPlan(RuntimeContext* context, nebula::DataSet* result, - std::unordered_map* cursors); + std::unordered_map* cursors, + StorageExpressionContext* expCtx); folly::Future> runInExecutor( RuntimeContext* context, nebula::DataSet* result, std::unordered_map* cursors, PartitionID partId, - Cursor cursor); + Cursor cursor, + StorageExpressionContext* expCtx); void runInSingleThread(const cpp2::ScanVertexRequest& req); @@ -57,6 +59,7 @@ class ScanVertexProcessor private: std::vector contexts_; + std::vector expCtxs_; std::vector results_; std::vector> cursorsOfPart_; diff --git a/src/storage/test/ScanEdgeTest.cpp b/src/storage/test/ScanEdgeTest.cpp index 3ed3d41a9dc..381b0df6c33 100644 --- a/src/storage/test/ScanEdgeTest.cpp +++ b/src/storage/test/ScanEdgeTest.cpp @@ -289,6 +289,49 @@ TEST(ScanEdgeTest, LimitTest) { } } +TEST(ScanEdgeTest, FilterTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + EdgeType serve = 101; + ObjectPool pool; + + { + LOG(INFO) << "Scan one edge with some properties in one batch"; + constexpr std::size_t limit = 3; + auto edge = std::make_pair( + serve, + std::vector{kSrc, kType, kRank, kDst, "teamName", "startYear", "endYear"}); + auto req = buildRequest({1}, {""}, edge, limit); + Expression* filter = EdgePropertyExpression::make(&pool, "101", kSrc); + filter = RelationalExpression::makeEQ( + &pool, filter, ConstantExpression::make(&pool, "Damian Lillard")); + req.set_filter(filter->encode()); + auto* processor = ScanEdgeProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + + DataSet expected({"101._src", + "101._type", + "101._rank", + "101._dst", + "101.teamName", + "101.startYear", + "101.endYear"}); + expected.emplace_back( + List({"Damian Lillard", 101, 2012, "Trail Blazers", "Trail Blazers", 2012, 2020})); + EXPECT_EQ(*resp.edge_data_ref(), expected); + } +} + } // namespace storage } // namespace nebula diff --git a/src/storage/test/ScanVertexTest.cpp b/src/storage/test/ScanVertexTest.cpp index f582848ad92..ea972dd39e6 100644 --- a/src/storage/test/ScanVertexTest.cpp +++ b/src/storage/test/ScanVertexTest.cpp @@ -437,6 +437,71 @@ TEST(ScanVertexTest, MultipleTagsTest) { } } +TEST(ScanVertexTest, FilterTest) { + fs::TempDir rootPath("/tmp/ScanVertexTest.XXXXXX"); + mock::MockCluster cluster; + cluster.initStorageKV(rootPath.path()); + auto* env = cluster.storageEnv_.get(); + auto totalParts = cluster.getTotalParts(); + ASSERT_EQ(true, QueryTestUtils::mockVertexData(env, totalParts)); + ASSERT_EQ(true, QueryTestUtils::mockEdgeData(env, totalParts)); + + TagID player = 1; + TagID team = 2; + ObjectPool pool; + + { + LOG(INFO) << "Scan one tag with some properties in one batch"; + // size_t totalRowCount = 0; + auto playerTag = + std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); + auto teamTag = std::make_pair(team, std::vector{kTag, "name"}); + auto req = buildRequest({1}, {""}, {playerTag, teamTag}); + Expression* filter = TagPropertyExpression::make(&pool, "1", "name"); + filter = + RelationalExpression::makeEQ(&pool, filter, ConstantExpression::make(&pool, "Kobe Bryant")); + req.set_filter(filter->encode()); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + nebula::DataSet expect( + {"_vid", "1._vid", "1._tag", "1.name", "1.age", "1.avgScore", "2._tag", "2.name"}); + expect.emplace_back(List( + {"Kobe Bryant", "Kobe Bryant", 1, "Kobe Bryant", 41, 25, Value::kEmpty, Value::kEmpty})); + EXPECT_EQ(expect, *resp.vertex_data_ref()); + } + { + LOG(INFO) << "Scan one tag with some properties in one batch"; + // size_t totalRowCount = 0; + auto playerTag = + std::make_pair(player, std::vector{kVid, kTag, "name", "age", "avgScore"}); + auto teamTag = std::make_pair(team, std::vector{kTag, "name"}); + auto req = buildRequest({1}, {""}, {playerTag, teamTag}); + Expression* filter = TagPropertyExpression::make(&pool, "1", "name"); + filter = + RelationalExpression::makeEQ(&pool, filter, ConstantExpression::make(&pool, "Kobe Bryant")); + filter = LogicalExpression::makeAnd( + &pool, + filter, + UnaryExpression::makeIsEmpty(&pool, TagPropertyExpression::make(&pool, "2", "name"))); + req.set_filter(filter->encode()); + auto* processor = ScanVertexProcessor::instance(env, nullptr); + auto f = processor->getFuture(); + processor->process(req); + auto resp = std::move(f).get(); + + ASSERT_EQ(0, resp.result.failed_parts.size()); + nebula::DataSet expect( + {"_vid", "1._vid", "1._tag", "1.name", "1.age", "1.avgScore", "2._tag", "2.name"}); + expect.emplace_back(List( + {"Kobe Bryant", "Kobe Bryant", 1, "Kobe Bryant", 41, 25, Value::kEmpty, Value::kEmpty})); + EXPECT_EQ(expect, *resp.vertex_data_ref()); + } +} + } // namespace storage } // namespace nebula diff --git a/src/storage/test/UpdateEdgeTest.cpp b/src/storage/test/UpdateEdgeTest.cpp index 86d66825c69..0dc6b115229 100644 --- a/src/storage/test/UpdateEdgeTest.cpp +++ b/src/storage/test/UpdateEdgeTest.cpp @@ -291,7 +291,8 @@ TEST(UpdateEdgeTest, No_Filter_Test) { auto resp = std::move(f).get(); LOG(INFO) << "Check the results..."; - EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()); + EXPECT_EQ(0, (*resp.result_ref()).failed_parts.size()) + << apache::thrift::util::enumNameSafe((*resp.result_ref()).failed_parts.front().get_code()); EXPECT_EQ(9, (*resp.props_ref()).colNames.size()); EXPECT_EQ("_inserted", (*resp.props_ref()).colNames[0]); EXPECT_EQ("101.playerName", (*resp.props_ref()).colNames[1]); From c6d1046c7ef8be4e98ef665ec1450ce24251fcc2 Mon Sep 17 00:00:00 2001 From: Doodle <13706157+critical27@users.noreply.github.com> Date: Wed, 24 Nov 2021 20:50:27 -0600 Subject: [PATCH 47/53] [Raft] Add prevote to make it better (#3322) * prevote * remove term in AskForVoteResp * address @kikimo's comments * rename proposedTerm_ to votedTerm_ * fix leader can't work when term changed during sending logs Co-authored-by: yaphet <4414314+darionyaphet@users.noreply.github.com> --- src/interface/raftex.thrift | 1 + src/kvstore/Listener.cpp | 5 +- src/kvstore/raftex/RaftPart.cpp | 274 ++++++++++++++++---------------- src/kvstore/raftex/RaftPart.h | 42 ++--- 4 files changed, 156 insertions(+), 166 deletions(-) diff --git a/src/interface/raftex.thrift b/src/interface/raftex.thrift index 8f1a8ea4f47..4c7dde896a9 100644 --- a/src/interface/raftex.thrift +++ b/src/interface/raftex.thrift @@ -50,6 +50,7 @@ struct AskForVoteRequest { 5: TermID term; // Proposed term 6: LogID last_log_id; // The last received log id 7: TermID last_log_term; // The term receiving the last log + 8: bool is_pre_vote; // Is pre vote or not } diff --git a/src/kvstore/Listener.cpp b/src/kvstore/Listener.cpp index f99ad2476e7..65d2e632cbd 100644 --- a/src/kvstore/Listener.cpp +++ b/src/kvstore/Listener.cpp @@ -49,7 +49,7 @@ void Listener::start(std::vector&& peers, bool) { lastLogId_ = wal_->lastLogId(); lastLogTerm_ = wal_->lastLogTerm(); - term_ = proposedTerm_ = lastLogTerm_; + term_ = lastLogTerm_; // Set the quorum number quorum_ = (peers.size() + 1) / 2; @@ -273,10 +273,9 @@ void Listener::resetListener() { reset(); VLOG(1) << folly::sformat( "The listener has been reset : leaderCommitId={}," - "proposedTerm={}, lastLogTerm={}, term={}," + "lastLogTerm={}, term={}," "lastApplyLogId={}", leaderCommitId_, - proposedTerm_, lastLogTerm_, term_, lastApplyLogId_); diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index 17b666488cb..de2aa7692c0 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -211,8 +211,7 @@ RaftPart::RaftPart( executor_(executor), snapshot_(snapshotMan), clientMan_(clientMan), - diskMan_(diskMan), - weight_(1) { + diskMan_(diskMan) { FileBasedWalPolicy policy; policy.fileSize = FLAGS_wal_file_size; policy.bufferSize = FLAGS_wal_buffer_size; @@ -262,7 +261,7 @@ void RaftPart::start(std::vector&& peers, bool asLearner) { lastLogId_ = wal_->lastLogId(); lastLogTerm_ = wal_->lastLogTerm(); - term_ = proposedTerm_ = lastLogTerm_; + term_ = lastLogTerm_; // Set the quorum number quorum_ = (peers.size() + 1) / 2; @@ -384,7 +383,8 @@ void RaftPart::preProcessTransLeader(const HostAddr& target) { self->role_ = Role::CANDIDATE; self->leader_ = HostAddr("", 0); } - self->leaderElection().get(); + // skip prevote for transfer leader + self->leaderElection(false).get(); }); } break; @@ -719,6 +719,7 @@ void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) { if (!wal_->appendLogs(iter)) { LOG_EVERY_N(WARNING, 100) << idStr_ << "Failed to write into WAL"; res = AppendLogResult::E_WAL_FAILURE; + wal_->rollbackToLog(lastLogId_); break; } lastId = wal_->lastLogId(); @@ -754,6 +755,7 @@ void RaftPart::replicateLogs(folly::EventBase* eb, std::lock_guard g(raftLock_); res = canAppendLogs(currTerm); if (res != AppendLogResult::SUCCEEDED) { + wal_->rollbackToLog(lastLogId_); break; } hosts = hosts_; @@ -943,6 +945,7 @@ void RaftPart::processAppendLogResponses(const AppendLogResponses& resps, // Not enough hosts accepted the log, re-try LOG_EVERY_N(WARNING, 100) << idStr_ << "Only " << numSucceeded << " hosts succeeded, Need to try again"; + usleep(1000); replicateLogs(eb, std::move(iter), currTerm, lastLogId, committedId, prevLogTerm, prevLogId); } } @@ -955,7 +958,7 @@ bool RaftPart::needToSendHeartbeat() { bool RaftPart::needToStartElection() { std::lock_guard g(raftLock_); if (status_ == Status::RUNNING && role_ == Role::FOLLOWER && - (lastMsgRecvDur_.elapsedInMSec() >= weight_ * FLAGS_raft_heartbeat_interval_secs * 1000 || + (lastMsgRecvDur_.elapsedInMSec() >= FLAGS_raft_heartbeat_interval_secs * 1000 || isBlindFollower_)) { LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur " << lastMsgRecvDur_.elapsedInMSec() << ", term " << term_; @@ -967,7 +970,8 @@ bool RaftPart::needToStartElection() { } bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req, - std::vector>& hosts) { + std::vector>& hosts, + bool isPreVote) { std::lock_guard g(raftLock_); // Make sure the partition is running @@ -982,14 +986,17 @@ bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req, return false; } - // Before start a new election, reset the votedAddr - votedAddr_ = HostAddr("", 0); - req.set_space(spaceId_); req.set_part(partId_); req.set_candidate_addr(addr_.host); req.set_candidate_port(addr_.port); - req.set_term(++proposedTerm_); // Bump up the proposed term + // Use term_ + 1 to check if peers would vote for me in prevote. + // Only increase the term when prevote succeeeded. + if (isPreVote) { + req.set_term(term_ + 1); + } else { + req.set_term(++term_); + } req.set_last_log_id(lastLogId_); req.set_last_log_term(lastLogTerm_); @@ -998,62 +1005,73 @@ bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req, return true; } -typename RaftPart::Role RaftPart::processElectionResponses( - const RaftPart::ElectionResponses& results, - std::vector> hosts, - TermID proposedTerm) { +bool RaftPart::processElectionResponses(const RaftPart::ElectionResponses& results, + std::vector> hosts, + TermID proposedTerm, + bool isPreVote) { std::lock_guard g(raftLock_); if (UNLIKELY(status_ == Status::STOPPED)) { LOG(INFO) << idStr_ << "The part has been stopped, skip the request"; - return role_; + return false; } if (UNLIKELY(status_ == Status::STARTING)) { LOG(INFO) << idStr_ << "The partition is still starting"; - return role_; + return false; } if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) { LOG(INFO) << idStr_ << "The partition is still waiting snapshot"; - return role_; + return false; } if (role_ != Role::CANDIDATE) { LOG(INFO) << idStr_ << "Partition's role has changed to " << roleStr(role_) << " during the election, so discard the results"; - return role_; + return false; + } + + // term changed during actual leader election + if (!isPreVote && proposedTerm != term_) { + LOG(INFO) << idStr_ << "Partition's term has changed during election, " + << "so just ignore the respsonses, " + << "expected " << proposedTerm << ", actual " << term_; + return false; } size_t numSucceeded = 0; for (auto& r : results) { if (r.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) { ++numSucceeded; - } else if (r.second.get_error_code() == cpp2::ErrorCode::E_LOG_STALE) { - LOG(INFO) << idStr_ << "My last log id is less than " << hosts[r.first]->address() - << ", double my election interval."; - uint64_t curWeight = weight_.load(); - weight_.store(curWeight * 2); } else { - LOG(ERROR) << idStr_ << "Receive response about askForVote from " << hosts[r.first]->address() - << ", error code is " - << apache::thrift::util::enumNameSafe(r.second.get_error_code()); + LOG(WARNING) << idStr_ << "Receive response about askForVote from " + << hosts[r.first]->address() << ", error code is " + << apache::thrift::util::enumNameSafe(r.second.get_error_code()) + << ", isPreVote = " << isPreVote; } } CHECK(role_ == Role::CANDIDATE); if (numSucceeded >= quorum_) { - LOG(INFO) << idStr_ << "Partition is elected as the new leader for term " << proposedTerm; - term_ = proposedTerm; - role_ = Role::LEADER; - isBlindFollower_ = false; + if (isPreVote) { + LOG(INFO) << idStr_ << "Partition win prevote of term " << proposedTerm; + } else { + LOG(INFO) << idStr_ << "Partition is elected as the new leader for term " << proposedTerm; + term_ = proposedTerm; + role_ = Role::LEADER; + isBlindFollower_ = false; + } + return true; } - return role_; + LOG(INFO) << idStr_ << "Did not get enough votes from election of term " << proposedTerm + << ", isPreVote = " << isPreVote; + return false; } -folly::Future RaftPart::leaderElection() { +folly::Future RaftPart::leaderElection(bool isPreVote) { VLOG(2) << idStr_ << "Start leader election..."; using namespace folly; // NOLINT since the fancy overload of | operator @@ -1065,7 +1083,7 @@ folly::Future RaftPart::leaderElection() { cpp2::AskForVoteRequest voteReq; decltype(hosts_) hosts; - if (!prepareElectionRequest(voteReq, hosts)) { + if (!prepareElectionRequest(voteReq, hosts, isPreVote)) { // Suppose we have three replicas A(leader), B, C, after A crashed, // B, C will begin the election. B win, and send hb, C has gap with B // and need the snapshot from B. Meanwhile C begin the election, @@ -1086,12 +1104,13 @@ folly::Future RaftPart::leaderElection() { << ", term = " << voteReq.get_term() << ", lastLogId = " << voteReq.get_last_log_id() << ", lastLogTerm = " << voteReq.get_last_log_term() << ", candidateIP = " << voteReq.get_candidate_addr() - << ", candidatePort = " << voteReq.get_candidate_port() << ")"; + << ", candidatePort = " << voteReq.get_candidate_port() << ")" + << ", isPreVote = " << isPreVote; auto proposedTerm = voteReq.get_term(); auto resps = ElectionResponses(); if (hosts.empty()) { - auto ret = handleElectionResponses(resps, hosts, proposedTerm); + auto ret = handleElectionResponses(resps, hosts, proposedTerm, isPreVote); inElection_ = false; return ret; } else { @@ -1114,13 +1133,14 @@ folly::Future RaftPart::leaderElection() { return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED && !hosts[idx]->isLearner(); }) .via(executor_.get()) - .then([self = shared_from_this(), pro = std::move(promise), hosts, proposedTerm]( + .then([self = shared_from_this(), pro = std::move(promise), hosts, proposedTerm, isPreVote]( auto&& t) mutable { VLOG(2) << self->idStr_ << "AskForVoteRequest has been sent to all peers, waiting for responses"; CHECK(!t.hasException()); - pro.setValue(self->handleElectionResponses(t.value(), std::move(hosts), proposedTerm)); self->inElection_ = false; + pro.setValue( + self->handleElectionResponses(t.value(), std::move(hosts), proposedTerm, isPreVote)); }); return future; } @@ -1128,50 +1148,30 @@ folly::Future RaftPart::leaderElection() { bool RaftPart::handleElectionResponses(const ElectionResponses& resps, const std::vector>& peers, - TermID proposedTerm) { + TermID proposedTerm, + bool isPreVote) { // Process the responses - switch (processElectionResponses(resps, std::move(peers), proposedTerm)) { - case Role::LEADER: { - // Elected - LOG(INFO) << idStr_ << "The partition is elected as the leader"; - std::vector> hosts; - { - std::lock_guard g(raftLock_); - if (status_ == Status::RUNNING) { - leader_ = addr_; - hosts = hosts_; - bgWorkers_->addTask( - [self = shared_from_this(), proposedTerm] { self->onElected(proposedTerm); }); - lastMsgAcceptedTime_ = 0; - } - weight_ = 1; - commitInThisTerm_ = false; - } - // reset host can't be executed with raftLock_, otherwise it may encounter deadlock - for (auto& host : hosts) { - host->reset(); + auto elected = processElectionResponses(resps, std::move(peers), proposedTerm, isPreVote); + if (!isPreVote && elected) { + std::vector> hosts; + { + std::lock_guard g(raftLock_); + if (status_ == Status::RUNNING) { + leader_ = addr_; + hosts = hosts_; + bgWorkers_->addTask( + [self = shared_from_this(), proposedTerm] { self->onElected(proposedTerm); }); + lastMsgAcceptedTime_ = 0; } - sendHeartbeat(); - return true; - } - case Role::FOLLOWER: { - // Someone was elected - LOG(INFO) << idStr_ << "Someone else was elected"; - return true; + commitInThisTerm_ = false; } - case Role::CANDIDATE: { - // No one has been elected - LOG(INFO) << idStr_ << "No one is elected, continue the election"; - return false; - } - case Role::LEARNER: { - LOG(FATAL) << idStr_ << " Impossible! There must be some bugs!"; - return false; + // reset host can't be executed with raftLock_, otherwise it may encounter deadlock + for (auto& host : hosts) { + host->reset(); } + sendHeartbeat(); } - - LOG(FATAL) << "Should not reach here"; - return false; + return elected; } void RaftPart::statusPolling(int64_t startTime) { @@ -1184,15 +1184,15 @@ void RaftPart::statusPolling(int64_t startTime) { return; } } - size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3; + size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3 + +folly::Random::rand32(500); if (needToStartElection()) { - if (leaderElection().get()) { - VLOG(2) << idStr_ << "Stop the election"; + if (leaderElection(true).get() && leaderElection(false).get()) { + // elected as leader } else { // No leader has been elected, need to continue // (After sleeping a random period between [500ms, 2s]) VLOG(2) << idStr_ << "Wait for a while and continue the leader election"; - delay = (folly::Random::rand32(1500) + 500) * weight_; + delay = (folly::Random::rand32(1500) + 500); } } else if (needToSendHeartbeat()) { VLOG(2) << idStr_ << "Need to send heartbeat"; @@ -1243,7 +1243,8 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, << ": space = " << req.get_space() << ", partition = " << req.get_part() << ", candidateAddr = " << req.get_candidate_addr() << ":" << req.get_candidate_port() << ", term = " << req.get_term() << ", lastLogId = " << req.get_last_log_id() - << ", lastLogTerm = " << req.get_last_log_term(); + << ", lastLogTerm = " << req.get_last_log_term() + << ", isPreVote = " << req.get_is_pre_vote(); std::lock_guard g(raftLock_); @@ -1275,19 +1276,27 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, } auto candidate = HostAddr(req.get_candidate_addr(), req.get_candidate_port()); + auto code = checkPeer(candidate); + if (code != cpp2::ErrorCode::SUCCEEDED) { + resp.set_error_code(code); + return; + } + // Check term id - auto term = term_; - if (req.get_term() <= term) { - LOG(INFO) << idStr_ - << (role_ == Role::CANDIDATE ? "The partition is currently proposing term " - : "The partition currently is on term ") - << term - << ". The term proposed by the candidate is" - " no greater, so it will be rejected"; + if (req.get_term() < term_) { + LOG(INFO) << idStr_ << "The partition currently is on term " << term_ + << ", the term proposed by the candidate is " << req.get_term() + << ", so it will be rejected"; resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE); return; } + auto oldTerm = term_; + // req.get_term() >= term_, we won't update term in prevote + if (!req.get_is_pre_vote()) { + term_ = req.get_term(); + } + // Check the last term to receive a log if (req.get_last_log_term() < lastLogTerm_) { LOG(INFO) << idStr_ << "The partition's last term to receive a log is " << lastLogTerm_ @@ -1308,27 +1317,31 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, } } - // If we have voted for somebody, we will reject other candidates under the - // proposedTerm. - if (votedAddr_ != HostAddr("", 0)) { - if (proposedTerm_ > req.get_term() || - (proposedTerm_ == req.get_term() && votedAddr_ != candidate)) { - LOG(INFO) << idStr_ << "We have voted " << votedAddr_ << " on term " << proposedTerm_ - << ", so we should reject the candidate " << candidate << " request on term " - << req.get_term(); - resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE); - return; - } + /* + check if we have voted some one in the candidate's proposed term + 1. if this is a prevote: + * not enough votes: the candidate will trigger another round election + * majority votes: the candidate will start formal election (I'll reject the formal one as well) + 2. if this is a formal election: + * not enough votes: the candidate will trigger another round election + * majority votes: the candidate will be leader + */ + if (votedTerm_ == req.get_term() && votedAddr_ != candidate) { + LOG(INFO) << idStr_ << "We have voted " << votedAddr_ << " on term " << votedTerm_ + << ", so we should reject the candidate " << candidate << " request on term " + << req.get_term(); + resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE); + return; } - - auto code = checkPeer(candidate); - if (code != cpp2::ErrorCode::SUCCEEDED) { - resp.set_error_code(code); + if (req.get_is_pre_vote()) { + // return succeed if it is prevote, do not change any state + resp.set_error_code(cpp2::ErrorCode::SUCCEEDED); return; } + // Ok, no reason to refuse, we will vote for the candidate - LOG(INFO) << idStr_ << "The partition will vote for the candidate " << candidate; - resp.set_error_code(cpp2::ErrorCode::SUCCEEDED); + LOG(INFO) << idStr_ << "The partition will vote for the candidate " << candidate + << ", isPreVote = " << req.get_is_pre_vote(); // Before change role from leader to follower, check the logs locally. if (role_ == Role::LEADER && wal_->lastLogId() > lastLogId_) { @@ -1337,16 +1350,15 @@ void RaftPart::processAskForVoteRequest(const cpp2::AskForVoteRequest& req, wal_->rollbackToLog(lastLogId_); } if (role_ == Role::LEADER) { - bgWorkers_->addTask([self = shared_from_this(), term] { self->onLostLeadership(term); }); + bgWorkers_->addTask([self = shared_from_this(), oldTerm] { self->onLostLeadership(oldTerm); }); } role_ = Role::FOLLOWER; votedAddr_ = candidate; - proposedTerm_ = req.get_term(); + votedTerm_ = req.get_term(); leader_ = HostAddr("", 0); // Reset the last message time lastMsgRecvDur_.reset(); - weight_ = 1; isBlindFollower_ = false; return; } @@ -1561,8 +1573,8 @@ void RaftPart::processAppendLogRequest(const cpp2::AppendLogRequest& req, template cpp2::ErrorCode RaftPart::verifyLeader(const REQ& req) { DCHECK(!raftLock_.try_lock()); - auto candidate = HostAddr(req.get_leader_addr(), req.get_leader_port()); - auto code = checkPeer(candidate); + auto peer = HostAddr(req.get_leader_addr(), req.get_leader_port()); + auto code = checkPeer(peer); if (code != cpp2::ErrorCode::SUCCEEDED) { return code; } @@ -1574,29 +1586,22 @@ cpp2::ErrorCode RaftPart::verifyLeader(const REQ& req) { << ". The local term is " << term_ << ". The remote term is not newer"; return cpp2::ErrorCode::E_TERM_OUT_OF_DATE; } else if (req.get_current_term() > term_) { - // Leader stickiness, no matter the term in Request is larger or not. - // TODO(heng) Maybe we should reconsider the logic - if (leader_ != HostAddr("", 0) && leader_ != candidate && - lastMsgRecvDur_.elapsedInMSec() < FLAGS_raft_heartbeat_interval_secs * 1000) { - LOG_EVERY_N(INFO, 100) << idStr_ << "I believe the leader " << leader_ << " exists. " - << "Refuse to append logs of " << candidate; - return cpp2::ErrorCode::E_WRONG_LEADER; - } + // found new leader with higher term } else { // req.get_current_term() == term_ do { - if (role_ != Role::LEADER && leader_ == HostAddr("", 0)) { - LOG_EVERY_N(INFO, 100) << idStr_ << "I dont know who is leader for current term " << term_ - << ", so accept the candidate " << candidate; + if (UNLIKELY(role_ == Role::LEADER)) { + LOG(ERROR) << idStr_ << "Split brain happens, will follow the new leader " << peer + << " on term " << req.get_current_term(); break; - } - // Same leader - if (role_ != Role::LEADER && candidate == leader_) { - return cpp2::ErrorCode::SUCCEEDED; } else { - LOG_EVERY_N(INFO, 100) << idStr_ << "The local term is same as remote term " << term_ - << ", my role is " << roleStr(role_) << ", reject it!"; - return cpp2::ErrorCode::E_TERM_OUT_OF_DATE; + if (LIKELY(leader_ == peer)) { + // Same leader + return cpp2::ErrorCode::SUCCEEDED; + } else if (UNLIKELY(leader_ == HostAddr("", 0))) { + // I don't know who is the leader, will accept it as new leader + break; + } } } while (false); } @@ -1606,16 +1611,13 @@ cpp2::ErrorCode RaftPart::verifyLeader(const REQ& req) { TermID oldTerm = term_; // Ok, no reason to refuse, just follow the leader LOG(INFO) << idStr_ << "The current role is " << roleStr(role_) << ". Will follow the new leader " - << req.get_leader_addr() << ":" << req.get_leader_port() - << " [Term: " << req.get_current_term() << "]"; + << peer << " on term " << req.get_current_term(); if (role_ != Role::LEARNER) { role_ = Role::FOLLOWER; } - leader_ = candidate; - term_ = proposedTerm_ = req.get_current_term(); - votedAddr_ = HostAddr("", 0); - weight_ = 1; + leader_ = peer; + term_ = req.get_current_term(); isBlindFollower_ = false; // Before accept the logs from the new leader, check the logs locally. if (wal_->lastLogId() > lastLogId_) { @@ -1735,7 +1737,7 @@ void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req, committedLogId_ = req.get_committed_log_id(); lastLogId_ = committedLogId_; lastLogTerm_ = req.get_committed_log_term(); - term_ = proposedTerm_ = lastLogTerm_; + term_ = lastLogTerm_; // there should be no wal after state converts to WAITING_SNAPSHOT, the RaftPart has been reset DCHECK_EQ(wal_->firstLogId(), 0); DCHECK_EQ(wal_->lastLogId(), 0); diff --git a/src/kvstore/raftex/RaftPart.h b/src/kvstore/raftex/RaftPart.h index a67fc0123c6..dfcf818a12a 100644 --- a/src/kvstore/raftex/RaftPart.h +++ b/src/kvstore/raftex/RaftPart.h @@ -344,25 +344,27 @@ class RaftPart : public std::enable_shared_from_this { void cleanupSnapshot(); // The method sends out AskForVote request - // Return true if a leader is elected (the leader could be self or others), - // otherwise returns false - folly::Future leaderElection(); + // Return true if I have been granted majority votes on proposedTerm, no matter isPreVote or not + folly::Future leaderElection(bool isPreVote); // The method will fill up the request object and return TRUE // if the election should continue. Otherwise the method will // return FALSE bool prepareElectionRequest(cpp2::AskForVoteRequest& req, - std::vector>& hosts); + std::vector>& hosts, + bool isPreVote); - // return true if elected as the leader, else return false + // Return true if I have been granted majority votes on proposedTerm, no matter isPreVote or not bool handleElectionResponses(const ElectionResponses& resps, const std::vector>& hosts, - TermID proposedTerm); + TermID proposedTerm, + bool isPreVote); - // The method returns the partition's role after the election - Role processElectionResponses(const ElectionResponses& results, + // Return true if I have been granted majority votes on proposedTerm, no matter isPreVote or not + bool processElectionResponses(const ElectionResponses& results, std::vector> hosts, - TermID proposedTerm); + TermID proposedTerm, + bool isPreVote); // Check whether new logs can be appended // Pre-condition: The caller needs to hold the raftLock_ @@ -513,27 +515,15 @@ class RaftPart : public std::enable_shared_from_this { HostAddr leader_; // After voted for somebody, it will not be empty anymore. - // And it will be reset to empty after current election finished. HostAddr votedAddr_; // The current term id // the term id proposed by that candidate TermID term_{0}; - // During normal operation, proposedTerm_ is equal to term_, - // when the partition becomes a candidate, proposedTerm_ will be - // bumped up by 1 every time when sending out the AskForVote - // Request - - // If voted for somebody, the proposeTerm will be reset to the candidate - // propose term. So we could use it to prevent revote if someone else ask for - // vote for current proposedTerm. - - // TODO(heng) We should persist it on the disk in the future - // Otherwise, after restart the whole cluster, maybe the stale - // leader still has the unsend log with larger term, and after other - // replicas elected the new leader, the stale one will not join in the - // Raft group any more. - TermID proposedTerm_{0}; + + // Once we have voted some one in formal election, we will set votedTerm_ and votedAddr_. + // To prevent we have voted more than once in a same term + TermID votedTerm_{0}; // The id and term of the last-sent log LogID lastLogId_{0}; @@ -582,8 +572,6 @@ class RaftPart : public std::enable_shared_from_this { // Used to bypass the stale command int64_t startTimeMs_ = 0; - std::atomic weight_; - std::atomic blocking_{false}; }; From a14d7b4192b56d0a7359597bdf755a29044e366a Mon Sep 17 00:00:00 2001 From: "endy.li" <25311962+heroicNeZha@users.noreply.github.com> Date: Thu, 25 Nov 2021 13:34:35 +0800 Subject: [PATCH 48/53] describe the user (#3300) * feat: describe the user * refactor - add syntax desc user xxx * bug - add permission check * style - permissionCheck * fix - user not exist * test - add tck session and cases * style - remove annotation * refact - table style * feat - compatible with pipe * fix - merge conflict * style - format * feat - tck query by common user * refact - remove duplicate interfaces * fix comment --- src/graph/executor/CMakeLists.txt | 1 + src/graph/executor/Executor.cpp | 4 + .../executor/admin/DescribeUserExecutor.cpp | 57 ++++++++++++ .../executor/admin/DescribeUserExecutor.h | 28 ++++++ src/graph/planner/plan/Admin.cpp | 6 ++ src/graph/planner/plan/Admin.h | 17 ++++ src/graph/planner/plan/PlanNode.cpp | 2 + src/graph/planner/plan/PlanNode.h | 1 + src/graph/service/PermissionCheck.cpp | 1 + src/graph/service/PermissionManager.cpp | 19 ++++ src/graph/service/PermissionManager.h | 1 + src/graph/validator/ACLValidator.cpp | 25 ++++++ src/graph/validator/ACLValidator.h | 14 +++ src/graph/validator/Validator.cpp | 2 + src/parser/AdminSentences.cpp | 4 + src/parser/AdminSentences.h | 15 ++++ src/parser/Sentence.h | 1 + src/parser/parser.yy | 12 ++- tests/tck/conftest.py | 6 ++ tests/tck/features/user/User.feature | 86 +++++++++++++++++++ 20 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 src/graph/executor/admin/DescribeUserExecutor.cpp create mode 100644 src/graph/executor/admin/DescribeUserExecutor.h diff --git a/src/graph/executor/CMakeLists.txt b/src/graph/executor/CMakeLists.txt index b7270c16ff9..5bb8064ce3c 100644 --- a/src/graph/executor/CMakeLists.txt +++ b/src/graph/executor/CMakeLists.txt @@ -51,6 +51,7 @@ nebula_add_library( admin/ChangePasswordExecutor.cpp admin/ListUserRolesExecutor.cpp admin/ListUsersExecutor.cpp + admin/DescribeUserExecutor.cpp admin/ListRolesExecutor.cpp admin/SubmitJobExecutor.cpp admin/ShowHostsExecutor.cpp diff --git a/src/graph/executor/Executor.cpp b/src/graph/executor/Executor.cpp index 27a6985e620..447e25acb31 100644 --- a/src/graph/executor/Executor.cpp +++ b/src/graph/executor/Executor.cpp @@ -20,6 +20,7 @@ #include "graph/executor/admin/CharsetExecutor.h" #include "graph/executor/admin/ConfigExecutor.h" #include "graph/executor/admin/CreateUserExecutor.h" +#include "graph/executor/admin/DescribeUserExecutor.h" #include "graph/executor/admin/DownloadExecutor.h" #include "graph/executor/admin/DropUserExecutor.h" #include "graph/executor/admin/GrantRoleExecutor.h" @@ -385,6 +386,9 @@ Executor *Executor::makeExecutor(QueryContext *qctx, const PlanNode *node) { case PlanNode::Kind::kListRoles: { return pool->add(new ListRolesExecutor(node, qctx)); } + case PlanNode::Kind::kDescribeUser: { + return pool->add(new DescribeUserExecutor(node, qctx)); + } case PlanNode::Kind::kShowConfigs: { return pool->add(new ShowConfigsExecutor(node, qctx)); } diff --git a/src/graph/executor/admin/DescribeUserExecutor.cpp b/src/graph/executor/admin/DescribeUserExecutor.cpp new file mode 100644 index 00000000000..9b2d893b7f8 --- /dev/null +++ b/src/graph/executor/admin/DescribeUserExecutor.cpp @@ -0,0 +1,57 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#include "graph/executor/admin/DescribeUserExecutor.h" + +#include + +#include "graph/context/QueryContext.h" +#include "graph/planner/plan/Admin.h" +#include "interface/gen-cpp2/meta_types.h" + +namespace nebula { +namespace graph { + +folly::Future DescribeUserExecutor::execute() { + SCOPED_TIMER(&execTime_); + return describeUser(); +} + +folly::Future DescribeUserExecutor::describeUser() { + auto* duNode = asNode(node()); + return qctx() + ->getMetaClient() + ->getUserRoles(*duNode->username()) + .via(runner()) + .thenValue([this](StatusOr>&& resp) { + SCOPED_TIMER(&execTime_); + if (!resp.ok()) { + return std::move(resp).status(); + } + + DataSet v({"role", "space"}); + auto roleItemList = std::move(resp).value(); + for (auto& item : roleItemList) { + if (item.get_space_id() == 0) { + v.emplace_back( + nebula::Row({apache::thrift::util::enumNameSafe(item.get_role_type()), ""})); + } else { + auto spaceNameResult = qctx_->schemaMng()->toGraphSpaceName(item.get_space_id()); + if (spaceNameResult.ok()) { + v.emplace_back(nebula::Row({apache::thrift::util::enumNameSafe(item.get_role_type()), + spaceNameResult.value()})); + } else { + LOG(ERROR) << " Space name of " << item.get_space_id() << " no found"; + return Status::Error("Space not found"); + } + } + } + return finish( + ResultBuilder().value(Value(std::move(v))).iter(Iterator::Kind::kSequential).build()); + }); +} + +} // namespace graph +} // namespace nebula diff --git a/src/graph/executor/admin/DescribeUserExecutor.h b/src/graph/executor/admin/DescribeUserExecutor.h new file mode 100644 index 00000000000..629e1cb7854 --- /dev/null +++ b/src/graph/executor/admin/DescribeUserExecutor.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2020 vesoft inc. All rights reserved. + * + * This source code is licensed under Apache 2.0 License. + */ + +#ifndef GRAPH_EXECUTOR_ADMIN_DESCRIBEUSEREXECUTOR_H_ +#define GRAPH_EXECUTOR_ADMIN_DESCRIBEUSEREXECUTOR_H_ + +#include "graph/executor/Executor.h" + +namespace nebula { +namespace graph { + +class DescribeUserExecutor final : public Executor { + public: + DescribeUserExecutor(const PlanNode *node, QueryContext *qctx) + : Executor("DescribeUsersExecutor", node, qctx) {} + + folly::Future execute() override; + + private: + folly::Future describeUser(); +}; + +} // namespace graph +} // namespace nebula + +#endif // GRAPH_EXECUTOR_ADMIN_LISTUSERSEXECUTOR_H_ diff --git a/src/graph/planner/plan/Admin.cpp b/src/graph/planner/plan/Admin.cpp index 7361c531163..e48793939fb 100644 --- a/src/graph/planner/plan/Admin.cpp +++ b/src/graph/planner/plan/Admin.cpp @@ -136,6 +136,12 @@ std::unique_ptr ChangePassword::explain() const { return desc; } +std::unique_ptr DescribeUser::explain() const { + auto desc = SingleDependencyNode::explain(); + addDescription("username", *username_, desc.get()); + return desc; +} + std::unique_ptr ListUserRoles::explain() const { auto desc = SingleDependencyNode::explain(); addDescription("username", *username_, desc.get()); diff --git a/src/graph/planner/plan/Admin.h b/src/graph/planner/plan/Admin.h index 998fdea1fea..1f5b4c24215 100644 --- a/src/graph/planner/plan/Admin.h +++ b/src/graph/planner/plan/Admin.h @@ -653,6 +653,23 @@ class ListUsers final : public SingleDependencyNode { : SingleDependencyNode(qctx, Kind::kListUsers, dep) {} }; +class DescribeUser final : public SingleDependencyNode { + public: + static DescribeUser* make(QueryContext* qctx, PlanNode* dep, const std::string* username) { + return qctx->objPool()->add(new DescribeUser(qctx, dep, username)); + } + + std::unique_ptr explain() const override; + + const std::string* username() const { return username_; } + + private: + explicit DescribeUser(QueryContext* qctx, PlanNode* dep, const std::string* username) + : SingleDependencyNode(qctx, Kind::kDescribeUser, dep), username_(username) {} + + const std::string* username_; +}; + class ListRoles final : public SingleDependencyNode { public: static ListRoles* make(QueryContext* qctx, PlanNode* dep, GraphSpaceID space) { diff --git a/src/graph/planner/plan/PlanNode.cpp b/src/graph/planner/plan/PlanNode.cpp index 388c08afab5..b1a43a2836c 100644 --- a/src/graph/planner/plan/PlanNode.cpp +++ b/src/graph/planner/plan/PlanNode.cpp @@ -148,6 +148,8 @@ const char* PlanNode::toString(PlanNode::Kind kind) { return "ListUserRoles"; case Kind::kListUsers: return "ListUsers"; + case Kind::kDescribeUser: + return "DescribeUser"; case Kind::kListRoles: return "ListRoles"; case Kind::kShowCreateSpace: diff --git a/src/graph/planner/plan/PlanNode.h b/src/graph/planner/plan/PlanNode.h index a5fdc3b5918..053272ce01b 100644 --- a/src/graph/planner/plan/PlanNode.h +++ b/src/graph/planner/plan/PlanNode.h @@ -121,6 +121,7 @@ class PlanNode { kListUserRoles, kListUsers, kListRoles, + kDescribeUser, // Snapshot kCreateSnapshot, diff --git a/src/graph/service/PermissionCheck.cpp b/src/graph/service/PermissionCheck.cpp index c0752634f3e..ccc3ebdcf36 100644 --- a/src/graph/service/PermissionCheck.cpp +++ b/src/graph/service/PermissionCheck.cpp @@ -177,6 +177,7 @@ Status PermissionCheck::permissionCheck(ClientSession *session, case Sentence::Kind::kShowRoles: { return PermissionManager::canReadSpace(session, targetSpace); } + case Sentence::Kind::kDescribeUser: case Sentence::Kind::kShowUsers: case Sentence::Kind::kShowSnapshots: case Sentence::Kind::kShowTSClients: diff --git a/src/graph/service/PermissionManager.cpp b/src/graph/service/PermissionManager.cpp index a0ea92b5229..80ce3153a53 100644 --- a/src/graph/service/PermissionManager.cpp +++ b/src/graph/service/PermissionManager.cpp @@ -113,6 +113,25 @@ Status PermissionManager::canWriteUser(ClientSession *session) { } } +// static +Status PermissionManager::canReadUser(ClientSession *session, const std::string &targetUser) { + if (!FLAGS_enable_authorize) { + return Status::OK(); + } + if (FLAGS_auth_type == "cloud") { + return Status::PermissionError("Cloud authenticate user can't read user."); + } + if (session->isGod()) { + return Status::OK(); + } + + if (session->user() == targetUser) { + return Status::OK(); + } + + return Status::PermissionError("No permission to read user `%s'.", targetUser.c_str()); +} + Status PermissionManager::canWriteRole(ClientSession *session, meta::cpp2::RoleType targetRole, GraphSpaceID spaceId, diff --git a/src/graph/service/PermissionManager.h b/src/graph/service/PermissionManager.h index 335e8d7fdaf..655b8dd420f 100644 --- a/src/graph/service/PermissionManager.h +++ b/src/graph/service/PermissionManager.h @@ -24,6 +24,7 @@ class PermissionManager final { static Status canWriteSpace(ClientSession *session); static Status canWriteSchema(ClientSession *session, ValidateContext *vctx); static Status canWriteUser(ClientSession *session); + static Status canReadUser(ClientSession *session, const std::string &targetUser); static Status canWriteRole(ClientSession *session, meta::cpp2::RoleType targetRole, GraphSpaceID spaceId, diff --git a/src/graph/validator/ACLValidator.cpp b/src/graph/validator/ACLValidator.cpp index 9fd45da9523..3f570f2e67a 100644 --- a/src/graph/validator/ACLValidator.cpp +++ b/src/graph/validator/ACLValidator.cpp @@ -131,6 +131,31 @@ Status RevokeRoleValidator::toPlan() { sentence->getAclItemClause()->getRoleType()); } +// describe user +Status DescribeUserValidator::validateImpl() { + auto sentence = static_cast(sentence_); + if (sentence->account()->size() > kUsernameMaxLength) { + return Status::SemanticError("Username exceed maximum length %ld characters.", + kUsernameMaxLength); + } + if (!inputs_.empty()) { + return Status::SemanticError("Show queries sentence do not support input"); + } + outputs_.emplace_back("role", Value::Type::STRING); + outputs_.emplace_back("space", Value::Type::STRING); + return Status::OK(); +} + +Status DescribeUserValidator::checkPermission() { + auto sentence = static_cast(sentence_); + return PermissionManager::canReadUser(qctx_->rctx()->session(), *sentence->account()); +} + +Status DescribeUserValidator::toPlan() { + auto sentence = static_cast(sentence_); + return genSingleNodePlan(sentence->account()); +} + // show roles in space Status ShowRolesInSpaceValidator::validateImpl() { auto sentence = static_cast(sentence_); diff --git a/src/graph/validator/ACLValidator.h b/src/graph/validator/ACLValidator.h index bf88439383c..616d6bc4eed 100644 --- a/src/graph/validator/ACLValidator.h +++ b/src/graph/validator/ACLValidator.h @@ -97,6 +97,20 @@ class RevokeRoleValidator final : public Validator { Status toPlan() override; }; +class DescribeUserValidator final : public Validator { + public: + DescribeUserValidator(Sentence* sentence, QueryContext* context) : Validator(sentence, context) { + setNoSpaceRequired(); + } + + private: + Status validateImpl() override; + + Status checkPermission() override; + + Status toPlan() override; +}; + class ShowRolesInSpaceValidator final : public Validator { public: ShowRolesInSpaceValidator(Sentence* sentence, QueryContext* context) diff --git a/src/graph/validator/Validator.cpp b/src/graph/validator/Validator.cpp index f7e2cb79816..687d4731492 100644 --- a/src/graph/validator/Validator.cpp +++ b/src/graph/validator/Validator.cpp @@ -131,6 +131,8 @@ std::unique_ptr Validator::makeValidator(Sentence* sentence, QueryCon return std::make_unique(sentence, context); case Sentence::Kind::kShowRoles: return std::make_unique(sentence, context); + case Sentence::Kind::kDescribeUser: + return std::make_unique(sentence, context); case Sentence::Kind::kAdminJob: case Sentence::Kind::kAdminShowJobs: return std::make_unique(sentence, context); diff --git a/src/parser/AdminSentences.cpp b/src/parser/AdminSentences.cpp index 54f4f29dea2..4e3cfafc281 100644 --- a/src/parser/AdminSentences.cpp +++ b/src/parser/AdminSentences.cpp @@ -27,6 +27,10 @@ std::string ShowPartsSentence::toString() const { return std::string("SHOW PARTS std::string ShowUsersSentence::toString() const { return std::string("SHOW USERS"); } +std::string DescribeUserSentence::toString() const { + return folly::stringPrintf("DESCRIBE USER %s", account_.get()->c_str()); +} + std::string ShowRolesSentence::toString() const { return folly::stringPrintf("SHOW ROLES IN %s", name_.get()->c_str()); } diff --git a/src/parser/AdminSentences.h b/src/parser/AdminSentences.h index f0f0af9500f..c08c738d39f 100644 --- a/src/parser/AdminSentences.h +++ b/src/parser/AdminSentences.h @@ -80,6 +80,21 @@ class ShowUsersSentence : public Sentence { std::string toString() const override; }; +class DescribeUserSentence : public Sentence { + public: + explicit DescribeUserSentence(std::string* account) { + account_.reset(account); + kind_ = Kind::kDescribeUser; + } + + std::string toString() const override; + + const std::string* account() const { return account_.get(); } + + private: + std::unique_ptr account_; +}; + class ShowRolesSentence : public Sentence { public: explicit ShowRolesSentence(std::string* name) { diff --git a/src/parser/Sentence.h b/src/parser/Sentence.h index ade75177b17..e943f8ed24a 100644 --- a/src/parser/Sentence.h +++ b/src/parser/Sentence.h @@ -79,6 +79,7 @@ class Sentence { kShowStats, kShowTSClients, kShowFTIndexes, + kDescribeUser, kDeleteVertices, kDeleteTags, kDeleteEdges, diff --git a/src/parser/parser.yy b/src/parser/parser.yy index a30907dd26e..3a5aab1f84b 100644 --- a/src/parser/parser.yy +++ b/src/parser/parser.yy @@ -358,7 +358,7 @@ static constexpr size_t kCommentLengthLimit = 256; %type add_listener_sentence remove_listener_sentence list_listener_sentence %type admin_job_sentence -%type create_user_sentence alter_user_sentence drop_user_sentence change_password_sentence +%type create_user_sentence alter_user_sentence drop_user_sentence change_password_sentence describe_user_sentence %type show_queries_sentence kill_query_sentence %type show_sentence @@ -2436,6 +2436,15 @@ column_property } ; +describe_user_sentence + : KW_DESCRIBE KW_USER name_label { + $$ = new DescribeUserSentence($3); + } + | KW_DESC KW_USER name_label { + $$ = new DescribeUserSentence($3); + } + ; + describe_tag_sentence : KW_DESCRIBE KW_TAG name_label { $$ = new DescribeTagSentence($3); @@ -2685,6 +2694,7 @@ traverse_sentence | delete_edge_sentence { $$ = $1; } | show_queries_sentence { $$ = $1; } | kill_query_sentence { $$ = $1; } + | describe_user_sentence { $$ = $1; } ; piped_sentence diff --git a/tests/tck/conftest.py b/tests/tck/conftest.py index 5ddcaddfcd4..637e1e1db32 100644 --- a/tests/tck/conftest.py +++ b/tests/tck/conftest.py @@ -213,6 +213,12 @@ def executing_query(query, graph_spaces, session, request): ngql = combine_query(query) exec_query(request, ngql, session, graph_spaces) +@when(parse("executing query with user {username} with password {password}:\n{query}")) +def executing_query(username, password, conn_pool_to_first_graph_service, query, graph_spaces, request): + sess = conn_pool_to_first_graph_service.get_session(username, password) + ngql = combine_query(query) + exec_query(request, ngql, sess, graph_spaces) + sess.release() @when(parse("profiling query:\n{query}")) def profiling_query(query, graph_spaces, session, request): diff --git a/tests/tck/features/user/User.feature b/tests/tck/features/user/User.feature index 5be998fb107..e66bb795bfa 100644 --- a/tests/tck/features/user/User.feature +++ b/tests/tck/features/user/User.feature @@ -280,3 +280,89 @@ Feature: User & privilege Test DROP SPACE user_tmp_space_3; """ Then the execution should be successful + + Scenario: Describe User + When executing query: + """ + CREATE SPACE IF NOT EXISTS user_tmp_space_4(partition_num=1, replica_factor=1, vid_type=FIXED_STRING(8)) + """ + Then the execution should be successful + And wait 10 seconds + When executing query: + """ + CREATE USER IF NOT EXISTS user1 WITH PASSWORD "pwd1" + """ + Then the execution should be successful + When executing query: + """ + GRANT ROLE ADMIN ON user_tmp_space_4 TO user1 + """ + Then the execution should be successful + When executing query: + """ + CREATE USER IF NOT EXISTS user2 WITH PASSWORD "pwd2" + """ + Then the execution should be successful + When executing query: + """ + GRANT ROLE ADMIN ON user_tmp_space_4 TO user2 + """ + Then the execution should be successful + When executing query: + """ + SHOW USERS + """ + Then the result should contain: + | Account | + | "root" | + | "user1" | + | "user2" | + When executing query: + """ + DESC USER root + """ + Then the result should be, in any order, with relax comparison: + | role | space | + | "GOD" | "" | + When executing query: + """ + DESC USER user1 + """ + Then the result should be, in any order, with relax comparison: + | role | space | + | "ADMIN" | "user_tmp_space_4" | + When executing query: + """ + DESC USER user1 | YIELD $-.space as sp + """ + Then the result should be, in any order, with relax comparison: + | sp | + | "user_tmp_space_4" | + When executing query: + """ + DESC USER user_not_exist + """ + Then the result should be, in any order, with relax comparison: + | role | space | + When executing query with user user1 with password pwd1: + """ + DESC USER user1 + """ + Then the result should be, in any order, with relax comparison: + | role | space | + | "ADMIN" | "user_tmp_space_4" | + When executing query with user user1 with password pwd1: + """ + DESC USER user2 + """ + Then a PermissionError should be raised at runtime: + When executing query: + """ + GRANT ROLE GUEST ON user_tmp_space_4 TO user1 + """ + Then the execution should be successful + When executing query with user user1 with password pwd1: + """ + DESC USER root + """ + Then a PermissionError should be raised at runtime: From ddb9ea4f4caacb882c2b6f172cb88f43172f3ea3 Mon Sep 17 00:00:00 2001 From: "lionel.liu@vesoft.com" <52276794+liuyu85cn@users.noreply.github.com> Date: Fri, 26 Nov 2021 09:55:12 +0800 Subject: [PATCH 49/53] [fix] can't show edge after clone space (#3351) * fix a bug may stuck * update * create space as can't show edge * add tck test case * do fmt tck test --- src/meta/processors/parts/CreateSpaceAsProcessor.cpp | 2 +- tests/tck/features/schema/CreateSpaceAs.feature | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/meta/processors/parts/CreateSpaceAsProcessor.cpp b/src/meta/processors/parts/CreateSpaceAsProcessor.cpp index 1341868dfd3..f6b27138b82 100644 --- a/src/meta/processors/parts/CreateSpaceAsProcessor.cpp +++ b/src/meta/processors/parts/CreateSpaceAsProcessor.cpp @@ -182,7 +182,7 @@ ErrorOr> CreateSpaceAsProcesso auto edgeType = MetaKeyUtils::parseEdgeType(iter->key()); auto edgeNameLen = *reinterpret_cast(val.data()); auto edgeName = val.subpiece(sizeof(int32_t), edgeNameLen).str(); - data.emplace_back(MetaKeyUtils::indexTagKey(newSpaceId, edgeName), + data.emplace_back(MetaKeyUtils::indexEdgeKey(newSpaceId, edgeName), std::string(reinterpret_cast(&edgeType), sizeof(edgeType))); auto ver = MetaKeyUtils::parseEdgeVersion(iter->key()); diff --git a/tests/tck/features/schema/CreateSpaceAs.feature b/tests/tck/features/schema/CreateSpaceAs.feature index 9398dd86bc7..f5042a3833e 100644 --- a/tests/tck/features/schema/CreateSpaceAs.feature +++ b/tests/tck/features/schema/CreateSpaceAs.feature @@ -91,6 +91,13 @@ Feature: Create space as another space Then the result should be, in any order: | Name | | "e1" | + When executing query: + """ + show create edge e1; + """ + Then the result should be, in any order: + | Edge | Create Edge | + | "e1" | 'CREATE EDGE `e1` (\n `col1` int64 NULL\n) ttl_duration = 0, ttl_col = ""' | When executing query: """ show tag indexes; From e1b5ef08901a7adda66f5a46c4a5d20296bd1850 Mon Sep 17 00:00:00 2001 From: Doodle <13706157+critical27@users.noreply.github.com> Date: Thu, 25 Nov 2021 22:01:39 -0600 Subject: [PATCH 50/53] enable kv interface in 2.0 (#3282) * enable kv interface in 2.0 * rebased, fix SimpleKVVerifyTool and StorageIntegrityTool * address @yixinglu's comments, rename GraphStorageClient to StorageClient --- src/clients/storage/CMakeLists.txt | 10 +- src/clients/storage/GeneralStorageClient.cpp | 90 -------- src/clients/storage/GeneralStorageClient.h | 50 ----- ...aphStorageClient.cpp => StorageClient.cpp} | 145 +++++++++---- .../{GraphStorageClient.h => StorageClient.h} | 26 ++- src/daemons/CMakeLists.txt | 4 +- src/graph/context/QueryContext.cpp | 2 +- src/graph/context/QueryContext.h | 10 +- src/graph/executor/mutate/DeleteExecutor.cpp | 8 +- src/graph/executor/mutate/InsertExecutor.cpp | 6 +- src/graph/executor/mutate/UpdateExecutor.cpp | 6 +- .../executor/query/AppendVerticesExecutor.cpp | 12 +- src/graph/executor/query/GetEdgesExecutor.cpp | 12 +- .../executor/query/GetNeighborsExecutor.cpp | 14 +- .../executor/query/GetVerticesExecutor.cpp | 12 +- .../executor/query/IndexScanExecutor.cpp | 12 +- src/graph/executor/query/IndexScanExecutor.h | 2 +- src/graph/executor/query/TraverseExecutor.cpp | 14 +- src/graph/executor/query/TraverseExecutor.h | 2 +- src/graph/executor/test/CMakeLists.txt | 2 +- src/graph/planner/test/CMakeLists.txt | 2 +- src/graph/service/GraphService.cpp | 2 +- src/graph/service/QueryEngine.cpp | 2 +- src/graph/service/QueryEngine.h | 4 +- src/interface/CMakeLists.txt | 2 +- src/interface/storage.thrift | 101 +++++---- src/meta/CMakeLists.txt | 3 +- src/mock/MockCluster.cpp | 27 +-- src/mock/MockCluster.h | 13 +- src/storage/CMakeLists.txt | 5 - src/storage/CompactionFilter.h | 7 - src/storage/GeneralStorageServiceHandler.cpp | 45 ---- src/storage/GeneralStorageServiceHandler.h | 33 --- src/storage/GraphStorageServiceHandler.cpp | 24 +++ src/storage/GraphStorageServiceHandler.h | 6 + src/storage/StorageServer.cpp | 7 +- src/storage/test/CMakeLists.txt | 4 +- src/storage/test/KVClientTest.cpp | 6 +- src/storage/test/KVTest.cpp | 2 +- src/storage/test/StorageClientTest.cpp | 2 +- src/storage/test/StorageLookupBenchmark.cpp | 2 +- src/storage/test/TossTestExecutor.h | 2 +- src/storage/test/TossTestUtils.h | 2 +- src/tools/CMakeLists.txt | 2 +- src/tools/simple-kv-verify/CMakeLists.txt | 19 +- .../simple-kv-verify/SimpleKVVerifyTool.cpp | 17 +- src/tools/storage-perf/CMakeLists.txt | 2 +- .../storage-perf/StorageIntegrityTool.cpp | 197 ++++++------------ src/tools/storage-perf/StoragePerfTool.cpp | 26 +-- 49 files changed, 387 insertions(+), 618 deletions(-) delete mode 100644 src/clients/storage/GeneralStorageClient.cpp delete mode 100644 src/clients/storage/GeneralStorageClient.h rename src/clients/storage/{GraphStorageClient.cpp => StorageClient.cpp} (83%) rename src/clients/storage/{GraphStorageClient.h => StorageClient.h} (84%) delete mode 100644 src/storage/GeneralStorageServiceHandler.cpp delete mode 100644 src/storage/GeneralStorageServiceHandler.h diff --git a/src/clients/storage/CMakeLists.txt b/src/clients/storage/CMakeLists.txt index 693f412f2cb..c6df1f8dda6 100644 --- a/src/clients/storage/CMakeLists.txt +++ b/src/clients/storage/CMakeLists.txt @@ -3,14 +3,8 @@ # This source code is licensed under Apache 2.0 License. nebula_add_library( - graph_storage_client_obj OBJECT - GraphStorageClient.cpp -) - - -nebula_add_library( - general_storage_client_obj OBJECT - GeneralStorageClient.cpp + storage_client_obj OBJECT + StorageClient.cpp ) diff --git a/src/clients/storage/GeneralStorageClient.cpp b/src/clients/storage/GeneralStorageClient.cpp deleted file mode 100644 index 03a1fbb327f..00000000000 --- a/src/clients/storage/GeneralStorageClient.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "clients/storage/GeneralStorageClient.h" - -#include "common/base/Base.h" - -namespace nebula { -namespace storage { - -folly::SemiFuture> GeneralStorageClient::get( - GraphSpaceID space, std::vector&& keys, bool returnPartly, folly::EventBase* evb) { - auto status = clusterIdsToHosts( - space, std::move(keys), [](const std::string& v) -> const std::string& { return v; }); - - if (!status.ok()) { - return folly::makeFuture>( - std::runtime_error(status.status().toString())); - } - - auto& clusters = status.value(); - std::unordered_map requests; - for (auto& c : clusters) { - auto& host = c.first; - auto& req = requests[host]; - req.set_space_id(space); - req.set_parts(std::move(c.second)); - req.set_return_partly(returnPartly); - } - - return collectResponse(evb, - std::move(requests), - [](cpp2::GeneralStorageServiceAsyncClient* client, - const cpp2::KVGetRequest& r) { return client->future_get(r); }); -} - -folly::SemiFuture> GeneralStorageClient::put( - GraphSpaceID space, std::vector kvs, folly::EventBase* evb) { - auto status = clusterIdsToHosts( - space, std::move(kvs), [](const KeyValue& v) -> const std::string& { return v.key; }); - - if (!status.ok()) { - return folly::makeFuture>( - std::runtime_error(status.status().toString())); - } - - auto& clusters = status.value(); - std::unordered_map requests; - for (auto& c : clusters) { - auto& host = c.first; - auto& req = requests[host]; - req.set_space_id(space); - req.set_parts(std::move(c.second)); - } - - return collectResponse(evb, - std::move(requests), - [](cpp2::GeneralStorageServiceAsyncClient* client, - const cpp2::KVPutRequest& r) { return client->future_put(r); }); -} - -folly::SemiFuture> GeneralStorageClient::remove( - GraphSpaceID space, std::vector keys, folly::EventBase* evb) { - auto status = clusterIdsToHosts( - space, std::move(keys), [](const std::string& v) -> const std::string& { return v; }); - - if (!status.ok()) { - return folly::makeFuture>( - std::runtime_error(status.status().toString())); - } - - auto& clusters = status.value(); - std::unordered_map requests; - for (auto& c : clusters) { - auto& host = c.first; - auto& req = requests[host]; - req.set_space_id(space); - req.set_parts(std::move(c.second)); - } - - return collectResponse(evb, - std::move(requests), - [](cpp2::GeneralStorageServiceAsyncClient* client, - const cpp2::KVRemoveRequest& r) { return client->future_remove(r); }); -} - -} // namespace storage -} // namespace nebula diff --git a/src/clients/storage/GeneralStorageClient.h b/src/clients/storage/GeneralStorageClient.h deleted file mode 100644 index 77d21608c8c..00000000000 --- a/src/clients/storage/GeneralStorageClient.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef CLIENTS_STORAGE_GENERALSTORAGECLIENT_H_ -#define CLIENTS_STORAGE_GENERALSTORAGECLIENT_H_ - -#include - -#include "clients/meta/MetaClient.h" -#include "clients/storage/StorageClientBase.h" -#include "common/base/Base.h" -#include "common/datatypes/KeyValue.h" -#include "interface/gen-cpp2/GeneralStorageServiceAsyncClient.h" - -namespace nebula { -namespace storage { - -/** - * A wrapper class for GeneralStorageServiceAsyncClient thrift API - * - * The class is NOT reentrant - */ -class GeneralStorageClient : public StorageClientBase { - using Parent = StorageClientBase; - - public: - GeneralStorageClient(std::shared_ptr ioThreadPool, - meta::MetaClient* metaClient) - : Parent(ioThreadPool, metaClient) {} - virtual ~GeneralStorageClient() {} - - folly::SemiFuture> get(GraphSpaceID space, - std::vector&& keys, - bool returnPartly = false, - folly::EventBase* evb = nullptr); - - folly::SemiFuture> put(GraphSpaceID space, - std::vector kvs, - folly::EventBase* evb = nullptr); - - folly::SemiFuture> remove(GraphSpaceID space, - std::vector keys, - folly::EventBase* evb = nullptr); -}; - -} // namespace storage -} // namespace nebula -#endif // CLIENTS_STORAGE_GENERALSTORAGECLIENT_H_ diff --git a/src/clients/storage/GraphStorageClient.cpp b/src/clients/storage/StorageClient.cpp similarity index 83% rename from src/clients/storage/GraphStorageClient.cpp rename to src/clients/storage/StorageClient.cpp index 85af874cc19..462dcefc367 100644 --- a/src/clients/storage/GraphStorageClient.cpp +++ b/src/clients/storage/StorageClient.cpp @@ -3,7 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" @@ -15,12 +15,12 @@ using nebula::storage::cpp2::GetPropResponse; namespace nebula { namespace storage { -GraphStorageClient::CommonRequestParam::CommonRequestParam(GraphSpaceID space_, - SessionID sess, - ExecutionPlanID plan_, - bool profile_, - bool experimental, - folly::EventBase* evb_) +StorageClient::CommonRequestParam::CommonRequestParam(GraphSpaceID space_, + SessionID sess, + ExecutionPlanID plan_, + bool profile_, + bool experimental, + folly::EventBase* evb_) : space(space_), session(sess), plan(plan_), @@ -28,7 +28,7 @@ GraphStorageClient::CommonRequestParam::CommonRequestParam(GraphSpaceID space_, useExperimentalFeature(experimental), evb(evb_) {} -cpp2::RequestCommon GraphStorageClient::CommonRequestParam::toReqCommon() const { +cpp2::RequestCommon StorageClient::CommonRequestParam::toReqCommon() const { cpp2::RequestCommon common; common.set_session_id(session); common.set_plan_id(plan); @@ -36,7 +36,7 @@ cpp2::RequestCommon GraphStorageClient::CommonRequestParam::toReqCommon() const return common; } -StorageRpcRespFuture GraphStorageClient::getNeighbors( +StorageRpcRespFuture StorageClient::getNeighbors( const CommonRequestParam& param, std::vector colNames, const std::vector& vertices, @@ -108,7 +108,7 @@ StorageRpcRespFuture GraphStorageClient::getNeighbor }); } -StorageRpcRespFuture GraphStorageClient::addVertices( +StorageRpcRespFuture StorageClient::addVertices( const CommonRequestParam& param, std::vector vertices, std::unordered_map> propNames, @@ -146,11 +146,10 @@ StorageRpcRespFuture GraphStorageClient::addVertices( }); } -StorageRpcRespFuture GraphStorageClient::addEdges( - const CommonRequestParam& param, - std::vector edges, - std::vector propNames, - bool ifNotExists) { +StorageRpcRespFuture StorageClient::addEdges(const CommonRequestParam& param, + std::vector edges, + std::vector propNames, + bool ifNotExists) { auto cbStatus = getIdFromNewEdge(param.space); if (!cbStatus.ok()) { return folly::makeFuture>( @@ -184,7 +183,7 @@ StorageRpcRespFuture GraphStorageClient::addEdges( }); } -StorageRpcRespFuture GraphStorageClient::getProps( +StorageRpcRespFuture StorageClient::getProps( const CommonRequestParam& param, const DataSet& input, const std::vector* vertexProps, @@ -240,7 +239,7 @@ StorageRpcRespFuture GraphStorageClient::getProps( const cpp2::GetPropRequest& r) { return client->future_getProps(r); }); } -StorageRpcRespFuture GraphStorageClient::deleteEdges( +StorageRpcRespFuture StorageClient::deleteEdges( const CommonRequestParam& param, std::vector edges) { auto cbStatus = getIdFromEdgeKey(param.space); if (!cbStatus.ok()) { @@ -273,7 +272,7 @@ StorageRpcRespFuture GraphStorageClient::deleteEdges( }); } -StorageRpcRespFuture GraphStorageClient::deleteVertices( +StorageRpcRespFuture StorageClient::deleteVertices( const CommonRequestParam& param, std::vector ids) { auto cbStatus = getIdFromValue(param.space); if (!cbStatus.ok()) { @@ -306,7 +305,7 @@ StorageRpcRespFuture GraphStorageClient::deleteVertices( }); } -StorageRpcRespFuture GraphStorageClient::deleteTags( +StorageRpcRespFuture StorageClient::deleteTags( const CommonRequestParam& param, std::vector delTags) { auto cbStatus = getIdFromDelTags(param.space); if (!cbStatus.ok()) { @@ -339,7 +338,7 @@ StorageRpcRespFuture GraphStorageClient::deleteTags( }); } -folly::Future> GraphStorageClient::updateVertex( +folly::Future> StorageClient::updateVertex( const CommonRequestParam& param, Value vertexId, TagID tagId, @@ -393,7 +392,7 @@ folly::Future> GraphStorageClient::updat }); } -folly::Future> GraphStorageClient::updateEdge( +folly::Future> StorageClient::updateEdge( const CommonRequestParam& param, storage::cpp2::EdgeKey edgeKey, std::vector updatedProps, @@ -448,9 +447,9 @@ folly::Future> GraphStorageClient::updat }); } -folly::Future> GraphStorageClient::getUUID(GraphSpaceID space, - const std::string& name, - folly::EventBase* evb) { +folly::Future> StorageClient::getUUID(GraphSpaceID space, + const std::string& name, + folly::EventBase* evb) { std::pair request; DCHECK(!!metaClient_); auto status = metaClient_->partsNum(space); @@ -482,7 +481,7 @@ folly::Future> GraphStorageClient::getUUID(GraphSpac }); } -StorageRpcRespFuture GraphStorageClient::lookupIndex( +StorageRpcRespFuture StorageClient::lookupIndex( const CommonRequestParam& param, const std::vector& contexts, bool isEdge, @@ -529,7 +528,7 @@ StorageRpcRespFuture GraphStorageClient::lookupIndex( }); } -StorageRpcRespFuture GraphStorageClient::lookupAndTraverse( +StorageRpcRespFuture StorageClient::lookupAndTraverse( const CommonRequestParam& param, cpp2::IndexSpec indexSpec, cpp2::TraverseSpec traverseSpec) { auto space = param.space; auto status = getHostParts(space); @@ -559,7 +558,7 @@ StorageRpcRespFuture GraphStorageClient::lookupAndTr }); } -StorageRpcRespFuture GraphStorageClient::scanEdge( +StorageRpcRespFuture StorageClient::scanEdge( const CommonRequestParam& param, const cpp2::EdgeProp& edgeProp, int64_t limit, @@ -590,7 +589,7 @@ StorageRpcRespFuture GraphStorageClient::scanEdge( const cpp2::ScanEdgeRequest& r) { return client->future_scanEdge(r); }); } -StorageRpcRespFuture GraphStorageClient::scanVertex( +StorageRpcRespFuture StorageClient::scanVertex( const CommonRequestParam& param, const std::vector& vertexProp, int64_t limit, @@ -623,7 +622,83 @@ StorageRpcRespFuture GraphStorageClient::scanVertex( }); } -StatusOr> GraphStorageClient::getIdFromRow( +folly::SemiFuture> StorageClient::get( + GraphSpaceID space, std::vector&& keys, bool returnPartly, folly::EventBase* evb) { + auto status = clusterIdsToHosts( + space, std::move(keys), [](const std::string& v) -> const std::string& { return v; }); + + if (!status.ok()) { + return folly::makeFuture>( + std::runtime_error(status.status().toString())); + } + + auto& clusters = status.value(); + std::unordered_map requests; + for (auto& c : clusters) { + auto& host = c.first; + auto& req = requests[host]; + req.set_space_id(space); + req.set_parts(std::move(c.second)); + req.set_return_partly(returnPartly); + } + + return collectResponse(evb, + std::move(requests), + [](cpp2::GraphStorageServiceAsyncClient* client, + const cpp2::KVGetRequest& r) { return client->future_get(r); }); +} + +folly::SemiFuture> StorageClient::put( + GraphSpaceID space, std::vector kvs, folly::EventBase* evb) { + auto status = clusterIdsToHosts( + space, std::move(kvs), [](const KeyValue& v) -> const std::string& { return v.key; }); + + if (!status.ok()) { + return folly::makeFuture>( + std::runtime_error(status.status().toString())); + } + + auto& clusters = status.value(); + std::unordered_map requests; + for (auto& c : clusters) { + auto& host = c.first; + auto& req = requests[host]; + req.set_space_id(space); + req.set_parts(std::move(c.second)); + } + + return collectResponse(evb, + std::move(requests), + [](cpp2::GraphStorageServiceAsyncClient* client, + const cpp2::KVPutRequest& r) { return client->future_put(r); }); +} + +folly::SemiFuture> StorageClient::remove( + GraphSpaceID space, std::vector keys, folly::EventBase* evb) { + auto status = clusterIdsToHosts( + space, std::move(keys), [](const std::string& v) -> const std::string& { return v; }); + + if (!status.ok()) { + return folly::makeFuture>( + std::runtime_error(status.status().toString())); + } + + auto& clusters = status.value(); + std::unordered_map requests; + for (auto& c : clusters) { + auto& host = c.first; + auto& req = requests[host]; + req.set_space_id(space); + req.set_parts(std::move(c.second)); + } + + return collectResponse(evb, + std::move(requests), + [](cpp2::GraphStorageServiceAsyncClient* client, + const cpp2::KVRemoveRequest& r) { return client->future_remove(r); }); +} + +StatusOr> StorageClient::getIdFromRow( GraphSpaceID space, bool isEdgeProps) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { @@ -669,8 +744,8 @@ StatusOr> GraphStorageClient::getIdFr return cb; } -StatusOr> -GraphStorageClient::getIdFromNewVertex(GraphSpaceID space) const { +StatusOr> StorageClient::getIdFromNewVertex( + GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { return vidTypeStatus.status(); @@ -696,7 +771,7 @@ GraphStorageClient::getIdFromNewVertex(GraphSpaceID space) const { return cb; } -StatusOr> GraphStorageClient::getIdFromNewEdge( +StatusOr> StorageClient::getIdFromNewEdge( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { @@ -732,7 +807,7 @@ StatusOr> GraphStorageClien return cb; } -StatusOr> GraphStorageClient::getIdFromEdgeKey( +StatusOr> StorageClient::getIdFromEdgeKey( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { @@ -764,7 +839,7 @@ StatusOr> GraphStorageClien return cb; } -StatusOr> GraphStorageClient::getIdFromValue( +StatusOr> StorageClient::getIdFromValue( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { @@ -791,7 +866,7 @@ StatusOr> GraphStorageClient::getId return cb; } -StatusOr> GraphStorageClient::getIdFromDelTags( +StatusOr> StorageClient::getIdFromDelTags( GraphSpaceID space) const { auto vidTypeStatus = metaClient_->getSpaceVidType(space); if (!vidTypeStatus) { diff --git a/src/clients/storage/GraphStorageClient.h b/src/clients/storage/StorageClient.h similarity index 84% rename from src/clients/storage/GraphStorageClient.h rename to src/clients/storage/StorageClient.h index 9b917b36add..2261f548b53 100644 --- a/src/clients/storage/GraphStorageClient.h +++ b/src/clients/storage/StorageClient.h @@ -3,8 +3,7 @@ * This source code is licensed under Apache 2.0 License. */ -#ifndef CLIENTS_STORAGE_GRAPHSTORAGECLIENT_H_ -#define CLIENTS_STORAGE_GRAPHSTORAGECLIENT_H_ +#pragma once #include @@ -23,7 +22,7 @@ using StorageRpcRespFuture = folly::SemiFuture>; * * The class is NOT reentrant */ -class GraphStorageClient : public StorageClientBase { +class StorageClient : public StorageClientBase { FRIEND_TEST(StorageClientTest, LeaderChangeTest); public: @@ -45,10 +44,10 @@ class GraphStorageClient : public StorageClientBase ioThreadPool, - meta::MetaClient* metaClient) + StorageClient(std::shared_ptr ioThreadPool, + meta::MetaClient* metaClient) : StorageClientBase(ioThreadPool, metaClient) {} - virtual ~GraphStorageClient() {} + virtual ~StorageClient() {} StorageRpcRespFuture getNeighbors( const CommonRequestParam& param, @@ -141,6 +140,19 @@ class GraphStorageClient : public StorageClientBase> get(GraphSpaceID space, + std::vector&& keys, + bool returnPartly = false, + folly::EventBase* evb = nullptr); + + folly::SemiFuture> put(GraphSpaceID space, + std::vector kvs, + folly::EventBase* evb = nullptr); + + folly::SemiFuture> remove(GraphSpaceID space, + std::vector keys, + folly::EventBase* evb = nullptr); + private: StatusOr> getIdFromRow(GraphSpaceID space, bool isEdgeProps) const; @@ -162,5 +174,3 @@ class GraphStorageClient : public StorageClientBase $ $ - $ + $ $ $ ${common_deps} @@ -127,7 +127,7 @@ nebula_add_executable( $ $ $ - $ + $ $ $ $ diff --git a/src/graph/context/QueryContext.cpp b/src/graph/context/QueryContext.cpp index f70727e9ad0..4fc2c6f65be 100644 --- a/src/graph/context/QueryContext.cpp +++ b/src/graph/context/QueryContext.cpp @@ -11,7 +11,7 @@ namespace graph { QueryContext::QueryContext(RequestContextPtr rctx, meta::SchemaManager* sm, meta::IndexManager* im, - storage::GraphStorageClient* storage, + storage::StorageClient* storage, meta::MetaClient* metaClient, CharsetInfo* charsetInfo) : rctx_(std::move(rctx)), diff --git a/src/graph/context/QueryContext.h b/src/graph/context/QueryContext.h index e9cba6cd4a1..00aa8b75336 100644 --- a/src/graph/context/QueryContext.h +++ b/src/graph/context/QueryContext.h @@ -7,7 +7,7 @@ #define GRAPH_CONTEXT_QUERYCONTEXT_H_ #include "clients/meta/MetaClient.h" -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/ObjectPool.h" #include "common/charset/Charset.h" #include "common/cpp/helpers.h" @@ -45,7 +45,7 @@ class QueryContext { QueryContext(RequestContextPtr rctx, meta::SchemaManager* sm, meta::IndexManager* im, - storage::GraphStorageClient* storage, + storage::StorageClient* storage, meta::MetaClient* metaClient, CharsetInfo* charsetInfo); @@ -57,7 +57,7 @@ class QueryContext { void setIndexManager(meta::IndexManager* im) { im_ = im; } - void setStorageClient(storage::GraphStorageClient* storage) { storageClient_ = storage; } + void setStorageClient(storage::StorageClient* storage) { storageClient_ = storage; } void setMetaClient(meta::MetaClient* metaClient) { metaClient_ = metaClient; } @@ -75,7 +75,7 @@ class QueryContext { meta::IndexManager* indexMng() const { return im_; } - storage::GraphStorageClient* getStorageClient() const { return storageClient_; } + storage::StorageClient* getStorageClient() const { return storageClient_; } meta::MetaClient* getMetaClient() const { return metaClient_; } @@ -105,7 +105,7 @@ class QueryContext { std::unique_ptr ep_; meta::SchemaManager* sm_{nullptr}; meta::IndexManager* im_{nullptr}; - storage::GraphStorageClient* storageClient_{nullptr}; + storage::StorageClient* storageClient_{nullptr}; meta::MetaClient* metaClient_{nullptr}; CharsetInfo* charsetInfo_{nullptr}; diff --git a/src/graph/executor/mutate/DeleteExecutor.cpp b/src/graph/executor/mutate/DeleteExecutor.cpp index 49fcb6a6830..a98ea20afe8 100644 --- a/src/graph/executor/mutate/DeleteExecutor.cpp +++ b/src/graph/executor/mutate/DeleteExecutor.cpp @@ -11,7 +11,7 @@ #include "graph/planner/plan/Mutate.h" #include "graph/util/SchemaUtil.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; namespace nebula { namespace graph { @@ -63,7 +63,7 @@ folly::Future DeleteVerticesExecutor::deleteVertices() { auto spaceId = spaceInfo.id; time::Duration deleteVertTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( spaceId, qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); return qctx() ->getStorageClient() @@ -119,7 +119,7 @@ folly::Future DeleteTagsExecutor::deleteTags() { auto spaceId = spaceInfo.id; time::Duration deleteTagTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( spaceId, qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); return qctx() ->getStorageClient() @@ -204,7 +204,7 @@ folly::Future DeleteEdgesExecutor::deleteEdges() { auto spaceId = spaceInfo.id; time::Duration deleteEdgeTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( spaceId, qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); return qctx() ->getStorageClient() diff --git a/src/graph/executor/mutate/InsertExecutor.cpp b/src/graph/executor/mutate/InsertExecutor.cpp index 94717c0d6c3..bb8806ade8b 100644 --- a/src/graph/executor/mutate/InsertExecutor.cpp +++ b/src/graph/executor/mutate/InsertExecutor.cpp @@ -10,7 +10,7 @@ #include "graph/planner/plan/Mutate.h" #include "graph/service/GraphFlags.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; namespace nebula { namespace graph { @@ -23,7 +23,7 @@ folly::Future InsertVerticesExecutor::insertVertices() { auto *ivNode = asNode(node()); time::Duration addVertTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( ivNode->getSpace(), qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); return qctx() ->getStorageClient() @@ -47,7 +47,7 @@ folly::Future InsertEdgesExecutor::insertEdges() { auto *ieNode = asNode(node()); time::Duration addEdgeTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( ieNode->getSpace(), qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); param.useExperimentalFeature = FLAGS_enable_experimental_feature; return qctx() diff --git a/src/graph/executor/mutate/UpdateExecutor.cpp b/src/graph/executor/mutate/UpdateExecutor.cpp index de748c2c266..3abfd089973 100644 --- a/src/graph/executor/mutate/UpdateExecutor.cpp +++ b/src/graph/executor/mutate/UpdateExecutor.cpp @@ -11,7 +11,7 @@ #include "graph/service/GraphFlags.h" #include "graph/util/SchemaUtil.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; namespace nebula { namespace graph { @@ -49,7 +49,7 @@ folly::Future UpdateVertexExecutor::execute() { time::Duration updateVertTime; auto plan = qctx()->plan(); auto sess = qctx()->rctx()->session(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( uvNode->getSpaceId(), sess->id(), plan->id(), plan->isProfileEnabled()); return qctx() ->getStorageClient() @@ -100,7 +100,7 @@ folly::Future UpdateEdgeExecutor::execute() { time::Duration updateEdgeTime; auto plan = qctx()->plan(); - GraphStorageClient::CommonRequestParam param( + StorageClient::CommonRequestParam param( ueNode->getSpaceId(), qctx()->rctx()->session()->id(), plan->id(), plan->isProfileEnabled()); param.useExperimentalFeature = FLAGS_enable_experimental_feature; return qctx() diff --git a/src/graph/executor/query/AppendVerticesExecutor.cpp b/src/graph/executor/query/AppendVerticesExecutor.cpp index 94f67efc581..aa6088f1db5 100644 --- a/src/graph/executor/query/AppendVerticesExecutor.cpp +++ b/src/graph/executor/query/AppendVerticesExecutor.cpp @@ -5,7 +5,7 @@ #include "graph/executor/query/AppendVerticesExecutor.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::GetPropResponse; @@ -25,17 +25,17 @@ folly::Future AppendVerticesExecutor::appendVertices() { SCOPED_TIMER(&execTime_); auto *av = asNode(node()); - GraphStorageClient *storageClient = qctx()->getStorageClient(); + StorageClient *storageClient = qctx()->getStorageClient(); DataSet vertices = buildRequestDataSet(av); if (vertices.rows.empty()) { return finish(ResultBuilder().value(Value(DataSet(av->colNames()))).build()); } - GraphStorageClient::CommonRequestParam param(av->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(av->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); time::Duration getPropsTime; return DCHECK_NOTNULL(storageClient) ->getProps(param, diff --git a/src/graph/executor/query/GetEdgesExecutor.cpp b/src/graph/executor/query/GetEdgesExecutor.cpp index 45830738962..4ee5ddace34 100644 --- a/src/graph/executor/query/GetEdgesExecutor.cpp +++ b/src/graph/executor/query/GetEdgesExecutor.cpp @@ -10,7 +10,7 @@ #include "graph/planner/plan/Query.h" #include "graph/util/SchemaUtil.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::GetPropResponse; @@ -49,7 +49,7 @@ DataSet GetEdgesExecutor::buildRequestDataSet(const GetEdges *ge) { folly::Future GetEdgesExecutor::getEdges() { SCOPED_TIMER(&execTime_); - GraphStorageClient *client = qctx()->getStorageClient(); + StorageClient *client = qctx()->getStorageClient(); auto *ge = asNode(node()); if (ge->src() == nullptr || ge->type() == nullptr || ge->ranking() == nullptr || ge->dst() == nullptr) { @@ -65,10 +65,10 @@ folly::Future GetEdgesExecutor::getEdges() { } time::Duration getPropsTime; - GraphStorageClient::CommonRequestParam param(ge->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(ge->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); return DCHECK_NOTNULL(client) ->getProps(param, std::move(edges), diff --git a/src/graph/executor/query/GetNeighborsExecutor.cpp b/src/graph/executor/query/GetNeighborsExecutor.cpp index b8151d0b3e1..c315ce53910 100644 --- a/src/graph/executor/query/GetNeighborsExecutor.cpp +++ b/src/graph/executor/query/GetNeighborsExecutor.cpp @@ -7,14 +7,14 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/datatypes/List.h" #include "common/datatypes/Vertex.h" #include "common/time/ScopedTimer.h" #include "graph/context/QueryContext.h" #include "graph/service/GraphFlags.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::GetNeighborsResponse; @@ -40,12 +40,12 @@ folly::Future GetNeighborsExecutor::execute() { } time::Duration getNbrTime; - GraphStorageClient* storageClient = qctx_->getStorageClient(); + StorageClient* storageClient = qctx_->getStorageClient(); QueryExpressionContext qec(qctx()->ectx()); - GraphStorageClient::CommonRequestParam param(gn_->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(gn_->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); return storageClient ->getNeighbors(param, std::move(reqDs.colNames), diff --git a/src/graph/executor/query/GetVerticesExecutor.cpp b/src/graph/executor/query/GetVerticesExecutor.cpp index ec2d9934c37..f3a7bd2dd59 100644 --- a/src/graph/executor/query/GetVerticesExecutor.cpp +++ b/src/graph/executor/query/GetVerticesExecutor.cpp @@ -9,7 +9,7 @@ #include "graph/context/QueryContext.h" #include "graph/util/SchemaUtil.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::GetPropResponse; @@ -22,7 +22,7 @@ folly::Future GetVerticesExecutor::getVertices() { SCOPED_TIMER(&execTime_); auto *gv = asNode(node()); - GraphStorageClient *storageClient = qctx()->getStorageClient(); + StorageClient *storageClient = qctx()->getStorageClient(); DataSet vertices = buildRequestDataSet(gv); if (vertices.rows.empty()) { @@ -32,10 +32,10 @@ folly::Future GetVerticesExecutor::getVertices() { } time::Duration getPropsTime; - GraphStorageClient::CommonRequestParam param(gv->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(gv->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); return DCHECK_NOTNULL(storageClient) ->getProps(param, std::move(vertices), diff --git a/src/graph/executor/query/IndexScanExecutor.cpp b/src/graph/executor/query/IndexScanExecutor.cpp index 13f667be63f..3f3b20ae37e 100644 --- a/src/graph/executor/query/IndexScanExecutor.cpp +++ b/src/graph/executor/query/IndexScanExecutor.cpp @@ -11,7 +11,7 @@ #include "graph/planner/plan/PlanNode.h" #include "graph/service/GraphFlags.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::LookupIndexResp; @@ -21,7 +21,7 @@ namespace graph { folly::Future IndexScanExecutor::execute() { return indexScan(); } folly::Future IndexScanExecutor::indexScan() { - GraphStorageClient *storageClient = qctx_->getStorageClient(); + StorageClient *storageClient = qctx_->getStorageClient(); auto *lookup = asNode(node()); if (lookup->isEmptyResultSet()) { DataSet dataSet({"dummy"}); @@ -35,10 +35,10 @@ folly::Future IndexScanExecutor::indexScan() { return Status::Error("There is no index to use at runtime"); } - GraphStorageClient::CommonRequestParam param(lookup->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(lookup->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); return storageClient ->lookupIndex(param, ictxs, diff --git a/src/graph/executor/query/IndexScanExecutor.h b/src/graph/executor/query/IndexScanExecutor.h index c9823752e26..4d5e489ec20 100644 --- a/src/graph/executor/query/IndexScanExecutor.h +++ b/src/graph/executor/query/IndexScanExecutor.h @@ -6,7 +6,7 @@ #ifndef GRAPH_EXECUTOR_QUERY_INDEXSCANEXECUTOR_H_ #define GRAPH_EXECUTOR_QUERY_INDEXSCANEXECUTOR_H_ -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "graph/executor/StorageAccessExecutor.h" #include "graph/planner/plan/Query.h" #include "interface/gen-cpp2/storage_types.h" diff --git a/src/graph/executor/query/TraverseExecutor.cpp b/src/graph/executor/query/TraverseExecutor.cpp index d2e23e2d458..ee1e638da8f 100644 --- a/src/graph/executor/query/TraverseExecutor.cpp +++ b/src/graph/executor/query/TraverseExecutor.cpp @@ -7,7 +7,7 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/datatypes/List.h" #include "common/datatypes/Vertex.h" #include "common/time/ScopedTimer.h" @@ -15,7 +15,7 @@ #include "graph/service/GraphFlags.h" #include "graph/util/SchemaUtil.h" -using nebula::storage::GraphStorageClient; +using nebula::storage::StorageClient; using nebula::storage::StorageRpcResponse; using nebula::storage::cpp2::GetNeighborsResponse; @@ -85,12 +85,12 @@ folly::Future TraverseExecutor::traverse() { void TraverseExecutor::getNeighbors() { currentStep_++; time::Duration getNbrTime; - GraphStorageClient* storageClient = qctx_->getStorageClient(); + StorageClient* storageClient = qctx_->getStorageClient(); bool finalStep = isFinalStep(); - GraphStorageClient::CommonRequestParam param(traverse_->space(), - qctx()->rctx()->session()->id(), - qctx()->plan()->id(), - qctx()->plan()->isProfileEnabled()); + StorageClient::CommonRequestParam param(traverse_->space(), + qctx()->rctx()->session()->id(), + qctx()->plan()->id(), + qctx()->plan()->isProfileEnabled()); storageClient ->getNeighbors(param, reqDs_.colNames, diff --git a/src/graph/executor/query/TraverseExecutor.h b/src/graph/executor/query/TraverseExecutor.h index 4f2802cb68d..d85d7c44247 100644 --- a/src/graph/executor/query/TraverseExecutor.h +++ b/src/graph/executor/query/TraverseExecutor.h @@ -8,7 +8,7 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/StatusOr.h" #include "common/datatypes/Value.h" #include "common/datatypes/Vertex.h" diff --git a/src/graph/executor/test/CMakeLists.txt b/src/graph/executor/test/CMakeLists.txt index d88f3b299e7..f664ee5f192 100644 --- a/src/graph/executor/test/CMakeLists.txt +++ b/src/graph/executor/test/CMakeLists.txt @@ -8,7 +8,7 @@ SET(EXEC_QUERY_TEST_OBJS $ $ $ - $ + $ $ $ $ diff --git a/src/graph/planner/test/CMakeLists.txt b/src/graph/planner/test/CMakeLists.txt index 355c1cbae60..bb45ebd9201 100644 --- a/src/graph/planner/test/CMakeLists.txt +++ b/src/graph/planner/test/CMakeLists.txt @@ -14,7 +14,7 @@ nebula_add_test( $ $ $ - $ + $ $ $ $ diff --git a/src/graph/service/GraphService.cpp b/src/graph/service/GraphService.cpp index 8058688de71..3b58c3be851 100644 --- a/src/graph/service/GraphService.cpp +++ b/src/graph/service/GraphService.cpp @@ -5,7 +5,7 @@ #include "graph/service/GraphService.h" -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/encryption/MD5Utils.h" #include "common/time/Duration.h" diff --git a/src/graph/service/QueryEngine.cpp b/src/graph/service/QueryEngine.cpp index b5d47df8e3a..d3f4f54e609 100644 --- a/src/graph/service/QueryEngine.cpp +++ b/src/graph/service/QueryEngine.cpp @@ -29,7 +29,7 @@ Status QueryEngine::init(std::shared_ptr ioExecutor metaClient_ = metaClient; schemaManager_ = meta::ServerBasedSchemaManager::create(metaClient_); indexManager_ = meta::ServerBasedIndexManager::create(metaClient_); - storage_ = std::make_unique(ioExecutor, metaClient_); + storage_ = std::make_unique(ioExecutor, metaClient_); charsetInfo_ = CharsetInfo::instance(); PlannersRegister::registerPlanners(); diff --git a/src/graph/service/QueryEngine.h b/src/graph/service/QueryEngine.h index a0b777bc35b..b85f5dc0e78 100644 --- a/src/graph/service/QueryEngine.h +++ b/src/graph/service/QueryEngine.h @@ -9,7 +9,7 @@ #include #include "clients/meta/MetaClient.h" -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/charset/Charset.h" #include "common/cpp/helpers.h" #include "common/meta/IndexManager.h" @@ -45,7 +45,7 @@ class QueryEngine final : public cpp::NonCopyable, public cpp::NonMovable { std::unique_ptr schemaManager_; std::unique_ptr indexManager_; - std::unique_ptr storage_; + std::unique_ptr storage_; std::unique_ptr optimizer_; std::unique_ptr memoryMonitorThread_; meta::MetaClient* metaClient_{nullptr}; diff --git a/src/interface/CMakeLists.txt b/src/interface/CMakeLists.txt index 21fc56f4336..5dcfefbaabb 100644 --- a/src/interface/CMakeLists.txt +++ b/src/interface/CMakeLists.txt @@ -22,7 +22,7 @@ thrift_generate("raftex" "RaftexService" ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CUR # Target object name : storage_thrift_obj thrift_generate( "storage" - "GraphStorageService;StorageAdminService;GeneralStorageService;InternalStorageService" + "GraphStorageService;StorageAdminService;InternalStorageService" ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} "interface" diff --git a/src/interface/storage.thrift b/src/interface/storage.thrift index 3451fd8b8e2..98c884b5485 100644 --- a/src/interface/storage.thrift +++ b/src/interface/storage.thrift @@ -630,18 +630,37 @@ struct TaskPara { 3: optional list task_specific_paras } -struct AddAdminTaskRequest { - // rebuild index / flush / compact / stats - 1: meta.AdminCmd cmd - 2: i32 job_id - 3: i32 task_id - 4: TaskPara para - 5: optional i32 concurrency +////////////////////////////////////////////////////////// +// +// Requests, responses for the kv interfaces +// +////////////////////////////////////////////////////////// +struct KVGetRequest { + 1: common.GraphSpaceID space_id, + 2: map>( + cpp.template = "std::unordered_map") parts, + // When return_partly is true and some of the keys not found, will return the keys + // which exist + 3: bool return_partly } -struct StopAdminTaskRequest { - 1: i32 job_id - 2: i32 task_id +struct KVGetResponse { + 1: required ResponseCommon result, + 2: map(cpp.template = "std::unordered_map") key_values, +} + +struct KVPutRequest { + 1: common.GraphSpaceID space_id, + // part -> key/value + 2: map>( + cpp.template = "std::unordered_map") parts, +} + +struct KVRemoveRequest { + 1: common.GraphSpaceID space_id, + // part -> key + 2: map>( + cpp.template = "std::unordered_map") parts, } service GraphStorageService { @@ -672,6 +691,10 @@ service GraphStorageService { UpdateResponse chainUpdateEdge(1: UpdateEdgeRequest req); ExecResponse chainAddEdges(1: AddEdgesRequest req); + + KVGetResponse get(1: KVGetRequest req); + ExecResponse put(1: KVPutRequest req); + ExecResponse remove(1: KVRemoveRequest req); } @@ -790,6 +813,20 @@ struct ListClusterInfoResp { struct ListClusterInfoReq { } +struct AddAdminTaskRequest { + // rebuild index / flush / compact / statis + 1: meta.AdminCmd cmd + 2: i32 job_id + 3: i32 task_id + 4: TaskPara para + 5: optional i32 concurrency +} + +struct StopAdminTaskRequest { + 1: i32 job_id + 2: i32 task_id +} + service StorageAdminService { // Interfaces for admin operations AdminExecResp transLeader(1: TransLeaderReq req); @@ -820,50 +857,6 @@ service StorageAdminService { } -////////////////////////////////////////////////////////// -// -// Requests, responses for the GeneralStorageService -// -////////////////////////////////////////////////////////// -struct KVGetRequest { - 1: common.GraphSpaceID space_id, - 2: map>( - cpp.template = "std::unordered_map") parts, - // When return_partly is true and some of the keys not found, will return the keys - // which exist - 3: bool return_partly -} - - -struct KVGetResponse { - 1: required ResponseCommon result, - 2: map(cpp.template = "std::unordered_map") key_values, -} - - -struct KVPutRequest { - 1: common.GraphSpaceID space_id, - // part -> key/value - 2: map>( - cpp.template = "std::unordered_map") parts, -} - - -struct KVRemoveRequest { - 1: common.GraphSpaceID space_id, - // part -> key - 2: map>( - cpp.template = "std::unordered_map") parts, -} - - -service GeneralStorageService { - // Interfaces for key-value storage - KVGetResponse get(1: KVGetRequest req); - ExecResponse put(1: KVPutRequest req); - ExecResponse remove(1: KVRemoveRequest req); -} - ////////////////////////////////////////////////////////// // // Requests, responses for the InternalStorageService diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index a02e1bd3073..7faf784bc26 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -110,11 +110,10 @@ set(meta_test_deps $ $ $ - $ $ $ $ - $ + $ $ $ $ diff --git a/src/mock/MockCluster.cpp b/src/mock/MockCluster.cpp index 3bc79378900..ad4e91b44f8 100644 --- a/src/mock/MockCluster.cpp +++ b/src/mock/MockCluster.cpp @@ -12,7 +12,6 @@ #include "mock/AdHocSchemaManager.h" #include "mock/MockData.h" #include "storage/CompactionFilter.h" -#include "storage/GeneralStorageServiceHandler.h" #include "storage/GraphStorageServiceHandler.h" #include "storage/StorageAdminServiceHandler.h" #include "storage/transaction/TransactionManager.h" @@ -217,7 +216,6 @@ void MockCluster::initStorageKV(const char* dataPath, void MockCluster::startStorage(HostAddr addr, const std::string& rootPath, - bool isGeneralService, SchemaVer schemaVerCount) { initStorageKV(rootPath.c_str(), addr, schemaVerCount); @@ -227,17 +225,10 @@ void MockCluster::startStorage(HostAddr addr, storageAdminServer_->start("admin-storage", addr.port - 1, adminHandler); LOG(INFO) << "The admin storage daemon started on port " << storageAdminServer_->port_; - if (!isGeneralService) { - graphStorageServer_ = std::make_unique(); - auto graphHandler = std::make_shared(env); - graphStorageServer_->start("graph-storage", addr.port, graphHandler); - LOG(INFO) << "The graph storage daemon started on port " << graphStorageServer_->port_; - } else { - generalStorageServer_ = std::make_unique(); - auto generalHandler = std::make_shared(env); - generalStorageServer_->start("general-storage", addr.port, generalHandler); - LOG(INFO) << "The general storage daemon started on port " << generalStorageServer_->port_; - } + graphStorageServer_ = std::make_unique(); + auto graphHandler = std::make_shared(env); + graphStorageServer_->start("graph-storage", addr.port, graphHandler); + LOG(INFO) << "The graph storage daemon started on port " << graphStorageServer_->port_; } std::unique_ptr MockCluster::memSchemaMan(SchemaVer schemaVerCount, @@ -293,17 +284,11 @@ meta::MetaClient* MockCluster::initMetaClient(meta::MetaClientOptions options) { return metaClient_.get(); } -storage::GraphStorageClient* MockCluster::initGraphStorageClient() { +storage::StorageClient* MockCluster::initGraphStorageClient() { auto threadPool = std::make_shared(1); - storageClient_ = std::make_unique(threadPool, metaClient_.get()); + storageClient_ = std::make_unique(threadPool, metaClient_.get()); return storageClient_.get(); } -storage::GeneralStorageClient* MockCluster::initGeneralStorageClient() { - auto threadPool = std::make_shared(1); - generalClient_ = std::make_unique(threadPool, metaClient_.get()); - return generalClient_.get(); -} - } // namespace mock } // namespace nebula diff --git a/src/mock/MockCluster.h b/src/mock/MockCluster.h index f7ed05a4ba2..8548a0e51f4 100644 --- a/src/mock/MockCluster.h +++ b/src/mock/MockCluster.h @@ -12,8 +12,7 @@ #include #include -#include "clients/storage/GeneralStorageClient.h" -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/base/ObjectPool.h" #include "kvstore/KVStore.h" @@ -36,7 +35,6 @@ class MockCluster { stop(); storageAdminServer_.reset(); graphStorageServer_.reset(); - generalStorageServer_.reset(); } void startAll(); @@ -45,7 +43,6 @@ class MockCluster { void startStorage(HostAddr addr, const std::string& rootPath, - bool isGeneralService = false, SchemaVer schemaVerCount = 1); /** @@ -58,9 +55,7 @@ class MockCluster { * Init a storage client connect to graphStorageServer * The meta server, and meta client must started first * */ - storage::GraphStorageClient* initGraphStorageClient(); - - storage::GeneralStorageClient* initGeneralStorageClient(); + storage::StorageClient* initGraphStorageClient(); std::unique_ptr memSchemaMan(SchemaVer schemaVerCount = 1, GraphSpaceID spaceId = 1, @@ -118,13 +113,11 @@ class MockCluster { public: std::unique_ptr metaServer_{nullptr}; std::unique_ptr metaClient_{nullptr}; - std::unique_ptr storageClient_{nullptr}; - std::unique_ptr generalClient_{nullptr}; + std::unique_ptr storageClient_{nullptr}; std::unique_ptr metaKV_{nullptr}; std::unique_ptr storageAdminServer_{nullptr}; std::unique_ptr graphStorageServer_{nullptr}; - std::unique_ptr generalStorageServer_{nullptr}; std::unique_ptr storageKV_{nullptr}; std::unique_ptr storageEnv_{nullptr}; diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index c182f8b4f99..4f263476edc 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -49,11 +49,6 @@ nebula_add_library( exec/IndexScanNode.cpp exec/IndexSelectionNode.cpp exec/IndexVertexScanNode.cpp -) - -nebula_add_library( - general_storage_service_handler OBJECT - GeneralStorageServiceHandler.cpp kv/PutProcessor.cpp kv/GetProcessor.cpp kv/RemoveProcessor.cpp diff --git a/src/storage/CompactionFilter.h b/src/storage/CompactionFilter.h index 6f95e3675fd..1c17b1d3ff4 100644 --- a/src/storage/CompactionFilter.h +++ b/src/storage/CompactionFilter.h @@ -15,8 +15,6 @@ #include "kvstore/CompactionFilter.h" #include "storage/CommonUtils.h" -DEFINE_bool(storage_kv_mode, false, "True for kv mode"); - namespace nebula { namespace storage { @@ -32,11 +30,6 @@ class StorageCompactionFilter final : public kvstore::KVFilter { bool filter(GraphSpaceID spaceId, const folly::StringPiece& key, const folly::StringPiece& val) const override { - if (FLAGS_storage_kv_mode) { - // in kv mode, we don't delete any data - return false; - } - if (NebulaKeyUtils::isTag(vIdLen_, key)) { return !vertexValid(spaceId, key, val); } else if (NebulaKeyUtils::isEdge(vIdLen_, key)) { diff --git a/src/storage/GeneralStorageServiceHandler.cpp b/src/storage/GeneralStorageServiceHandler.cpp deleted file mode 100644 index 308bbd97fd2..00000000000 --- a/src/storage/GeneralStorageServiceHandler.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#include "storage/GeneralStorageServiceHandler.h" - -#include "storage/kv/GetProcessor.h" -#include "storage/kv/PutProcessor.h" -#include "storage/kv/RemoveProcessor.h" - -#define RETURN_FUTURE(processor) \ - auto f = processor->getFuture(); \ - processor->process(req); \ - return f; - -namespace nebula { -namespace storage { - -GeneralStorageServiceHandler::GeneralStorageServiceHandler(StorageEnv* env) : env_(env) { - kPutCounters.init("put"); - kGetCounters.init("get"); - kRemoveCounters.init("remove"); -} - -folly::Future GeneralStorageServiceHandler::future_put( - const cpp2::KVPutRequest& req) { - auto* processor = PutProcessor::instance(env_); - RETURN_FUTURE(processor); -} - -folly::Future GeneralStorageServiceHandler::future_get( - const cpp2::KVGetRequest& req) { - auto* processor = GetProcessor::instance(env_); - RETURN_FUTURE(processor); -} - -folly::Future GeneralStorageServiceHandler::future_remove( - const cpp2::KVRemoveRequest& req) { - auto* processor = RemoveProcessor::instance(env_); - RETURN_FUTURE(processor); -} - -} // namespace storage -} // namespace nebula diff --git a/src/storage/GeneralStorageServiceHandler.h b/src/storage/GeneralStorageServiceHandler.h deleted file mode 100644 index de1e4ed89ec..00000000000 --- a/src/storage/GeneralStorageServiceHandler.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2020 vesoft inc. All rights reserved. - * - * This source code is licensed under Apache 2.0 License. - */ - -#ifndef STORAGE_GENERALSTORAGESERVICEHANDLER_H_ -#define STORAGE_GENERALSTORAGESERVICEHANDLER_H_ - -#include "common/base/Base.h" -#include "interface/gen-cpp2/GeneralStorageService.h" - -namespace nebula { -namespace storage { - -class StorageEnv; - -class GeneralStorageServiceHandler final : public cpp2::GeneralStorageServiceSvIf { - public: - explicit GeneralStorageServiceHandler(StorageEnv* env); - - folly::Future future_put(const cpp2::KVPutRequest& req) override; - - folly::Future future_get(const cpp2::KVGetRequest& req) override; - - folly::Future future_remove(const cpp2::KVRemoveRequest& req) override; - - private: - StorageEnv* env_{nullptr}; -}; - -} // namespace storage -} // namespace nebula -#endif // STORAGE_GENERALSTORAGESERVICEHANDLER_H_ diff --git a/src/storage/GraphStorageServiceHandler.cpp b/src/storage/GraphStorageServiceHandler.cpp index 2c99894097f..23f9abe18ec 100644 --- a/src/storage/GraphStorageServiceHandler.cpp +++ b/src/storage/GraphStorageServiceHandler.cpp @@ -6,6 +6,9 @@ #include "storage/GraphStorageServiceHandler.h" #include "storage/index/LookupProcessor.h" +#include "storage/kv/GetProcessor.h" +#include "storage/kv/PutProcessor.h" +#include "storage/kv/RemoveProcessor.h" #include "storage/mutate/AddEdgesProcessor.h" #include "storage/mutate/AddVerticesProcessor.h" #include "storage/mutate/DeleteEdgesProcessor.h" @@ -57,6 +60,9 @@ GraphStorageServiceHandler::GraphStorageServiceHandler(StorageEnv* env) : env_(e kLookupCounters.init("lookup"); kScanVertexCounters.init("scan_vertex"); kScanEdgeCounters.init("scan_edge"); + kPutCounters.init("kv_put"); + kGetCounters.init("kv_get"); + kRemoveCounters.init("kv_remove"); } // Vertice section @@ -154,5 +160,23 @@ folly::Future GraphStorageServiceHandler::future_chainAddEdg RETURN_FUTURE(processor); } +folly::Future GraphStorageServiceHandler::future_put( + const cpp2::KVPutRequest& req) { + auto* processor = PutProcessor::instance(env_); + RETURN_FUTURE(processor); +} + +folly::Future GraphStorageServiceHandler::future_get( + const cpp2::KVGetRequest& req) { + auto* processor = GetProcessor::instance(env_); + RETURN_FUTURE(processor); +} + +folly::Future GraphStorageServiceHandler::future_remove( + const cpp2::KVRemoveRequest& req) { + auto* processor = RemoveProcessor::instance(env_); + RETURN_FUTURE(processor); +} + } // namespace storage } // namespace nebula diff --git a/src/storage/GraphStorageServiceHandler.h b/src/storage/GraphStorageServiceHandler.h index 905c3fd67a7..a28b1509cb1 100644 --- a/src/storage/GraphStorageServiceHandler.h +++ b/src/storage/GraphStorageServiceHandler.h @@ -62,6 +62,12 @@ class GraphStorageServiceHandler final : public cpp2::GraphStorageServiceSvIf { folly::Future future_getUUID(const cpp2::GetUUIDReq& req) override; + folly::Future future_put(const cpp2::KVPutRequest& req) override; + + folly::Future future_get(const cpp2::KVGetRequest& req) override; + + folly::Future future_remove(const cpp2::KVRemoveRequest& req) override; + private: StorageEnv* env_{nullptr}; std::shared_ptr readerPool_; diff --git a/src/storage/StorageServer.cpp b/src/storage/StorageServer.cpp index bdc332e2ab7..44e864928b3 100644 --- a/src/storage/StorageServer.cpp +++ b/src/storage/StorageServer.cpp @@ -38,6 +38,7 @@ DEFINE_int32(num_io_threads, 16, "Number of IO threads"); DEFINE_int32(num_worker_threads, 32, "Number of workers"); DEFINE_int32(storage_http_thread_num, 3, "Number of storage daemon's http thread"); DEFINE_bool(local_config, false, "meta client will not retrieve latest configuration from meta"); +DEFINE_bool(storage_kv_mode, false, "True for kv mode"); namespace nebula { namespace storage { @@ -62,8 +63,10 @@ std::unique_ptr StorageServer::getStoreInstance() { options.listenerPath_ = listenerPath_; options.partMan_ = std::make_unique(localHost_, metaClient_.get()); - options.cffBuilder_ = - std::make_unique(schemaMan_.get(), indexMan_.get()); + if (!FLAGS_storage_kv_mode) { + options.cffBuilder_ = + std::make_unique(schemaMan_.get(), indexMan_.get()); + } options.schemaMan_ = schemaMan_.get(); if (FLAGS_store_type == "nebula") { auto nbStore = std::make_unique( diff --git a/src/storage/test/CMakeLists.txt b/src/storage/test/CMakeLists.txt index ceb3809be17..44f27faa41f 100644 --- a/src/storage/test/CMakeLists.txt +++ b/src/storage/test/CMakeLists.txt @@ -7,9 +7,8 @@ set(storage_test_deps $ $ $ - $ $ - $ + $ $ $ $ @@ -583,7 +582,6 @@ nebula_add_test( SOURCES KVClientTest.cpp OBJECTS - $ ${storage_test_deps} LIBRARIES ${ROCKSDB_LIBRARIES} diff --git a/src/storage/test/KVClientTest.cpp b/src/storage/test/KVClientTest.cpp index 7249997686f..06122bf9ca1 100644 --- a/src/storage/test/KVClientTest.cpp +++ b/src/storage/test/KVClientTest.cpp @@ -5,7 +5,7 @@ #include -#include "clients/storage/GeneralStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/datatypes/KeyValue.h" #include "common/fs/TempDir.h" @@ -44,9 +44,9 @@ TEST(KVClientTest, SimpleTest) { options.localHost_ = storageAddr; options.role_ = meta::cpp2::HostRole::STORAGE; cluster.initMetaClient(options); - cluster.startStorage(storageAddr, storagePath.path(), true); + cluster.startStorage(storageAddr, storagePath.path()); - auto client = cluster.initGeneralStorageClient(); + auto client = cluster.initGraphStorageClient(); // kv interface test { std::vector pairs; diff --git a/src/storage/test/KVTest.cpp b/src/storage/test/KVTest.cpp index d7520bbc903..725ec1d8803 100644 --- a/src/storage/test/KVTest.cpp +++ b/src/storage/test/KVTest.cpp @@ -5,7 +5,7 @@ #include -#include "clients/storage/GeneralStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/fs/TempDir.h" #include "common/network/NetworkUtils.h" diff --git a/src/storage/test/StorageClientTest.cpp b/src/storage/test/StorageClientTest.cpp index 10f658f2a11..6b7dd2ec136 100644 --- a/src/storage/test/StorageClientTest.cpp +++ b/src/storage/test/StorageClientTest.cpp @@ -5,7 +5,7 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "codec/RowReader.h" #include "common/base/Base.h" #include "common/fs/TempDir.h" diff --git a/src/storage/test/StorageLookupBenchmark.cpp b/src/storage/test/StorageLookupBenchmark.cpp index d421b3910cc..95d229a530c 100644 --- a/src/storage/test/StorageLookupBenchmark.cpp +++ b/src/storage/test/StorageLookupBenchmark.cpp @@ -5,7 +5,7 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "codec/RowWriter.h" #include "common/base/Base.h" #include "common/fs/FileUtils.h" diff --git a/src/storage/test/TossTestExecutor.h b/src/storage/test/TossTestExecutor.h index 85475f7f260..99e81c550a5 100644 --- a/src/storage/test/TossTestExecutor.h +++ b/src/storage/test/TossTestExecutor.h @@ -10,7 +10,7 @@ namespace nebula { namespace storage { -using StorageClient = storage::GraphStorageClient; +using StorageClient = storage::StorageClient; template class StorageResponseReader { diff --git a/src/storage/test/TossTestUtils.h b/src/storage/test/TossTestUtils.h index f7000c47ecc..006cd46753f 100644 --- a/src/storage/test/TossTestUtils.h +++ b/src/storage/test/TossTestUtils.h @@ -14,8 +14,8 @@ #include #include -#include "clients/storage/GraphStorageClient.h" #include "clients/storage/InternalStorageClient.h" +#include "clients/storage/StorageClient.h" #include "codec/RowWriterV2.h" #include "common/base/Base.h" #include "common/expression/ConstantExpression.h" diff --git a/src/tools/CMakeLists.txt b/src/tools/CMakeLists.txt index 23fb03897d2..9c0e31c33f0 100644 --- a/src/tools/CMakeLists.txt +++ b/src/tools/CMakeLists.txt @@ -1,5 +1,5 @@ nebula_add_subdirectory(storage-perf) -#nebula_add_subdirectory(simple-kv-verify) +nebula_add_subdirectory(simple-kv-verify) nebula_add_subdirectory(meta-dump) nebula_add_subdirectory(db-dump) nebula_add_subdirectory(db-upgrade) diff --git a/src/tools/simple-kv-verify/CMakeLists.txt b/src/tools/simple-kv-verify/CMakeLists.txt index cf65e874429..2aff91b1a5b 100644 --- a/src/tools/simple-kv-verify/CMakeLists.txt +++ b/src/tools/simple-kv-verify/CMakeLists.txt @@ -10,6 +10,7 @@ nebula_add_executable( $ $ $ + $ $ $ $ @@ -20,11 +21,12 @@ nebula_add_executable( $ $ $ + $ + $ $ $ $ $ - $ $ $ $ @@ -40,25 +42,20 @@ nebula_add_executable( $ $ $ - $ $ $ $ $ + $ + $ + $ $ + $ $ LIBRARIES ${ROCKSDB_LIBRARIES} ${THRIFT_LIBRARIES} + ${PROXYGEN_LIBRARIES} wangle gtest ) - -#install( -# TARGETS -# simple_kv_verify -# DESTINATION -# bin -# COMPONENT -# tool -#) diff --git a/src/tools/simple-kv-verify/SimpleKVVerifyTool.cpp b/src/tools/simple-kv-verify/SimpleKVVerifyTool.cpp index 8590bf29be6..35179b17c72 100644 --- a/src/tools/simple-kv-verify/SimpleKVVerifyTool.cpp +++ b/src/tools/simple-kv-verify/SimpleKVVerifyTool.cpp @@ -4,9 +4,10 @@ */ #include +#include #include "clients/meta/MetaClient.h" -#include "clients/storage/GeneralStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/datatypes/KeyValue.h" #include "common/meta/SchemaManager.h" @@ -52,8 +53,7 @@ class SimpleKVVerifyTool { spaceId_ = spaceResult.value(); LOG(INFO) << "Space ID: " << spaceId_; - generalStorageClient_ = - std::make_unique(ioExecutor, metaClient_.get()); + storageClient_ = std::make_unique(ioExecutor, metaClient_.get()); return EXIT_SUCCESS; } @@ -66,7 +66,7 @@ class SimpleKVVerifyTool { keyValues.emplace_back(std::make_pair(key, value)); } - auto future = generalStorageClient_->put(spaceId_, std::move(keyValues)); + auto future = storageClient_->put(spaceId_, std::move(keyValues)); auto resp = std::move(future).get(); if (!resp.succeeded()) { LOG(ERROR) << "Put Failed"; @@ -89,7 +89,7 @@ class SimpleKVVerifyTool { keys.emplace_back(pair.first); } - auto future = generalStorageClient_->get(spaceId_, std::move(keys)); + auto future = storageClient_->get(spaceId_, std::move(keys)); auto resp = std::move(future).get(); if (!resp.succeeded()) { LOG(ERROR) << "Get Failed"; @@ -108,8 +108,9 @@ class SimpleKVVerifyTool { auto key = pair.first; bool found = false; for (const auto& result : resp.responses()) { - auto iter = result.key_values.find(key); - if (iter != result.key_values.end()) { + auto kvs = result.get_key_values(); + auto iter = kvs.find(key); + if (iter != kvs.end()) { if (iter->second != pairs[key]) { LOG(ERROR) << "Check Fail: key = " << key << ", values: " << iter->second << " != " << pairs[key]; @@ -128,7 +129,7 @@ class SimpleKVVerifyTool { } private: - std::unique_ptr generalStorageClient_; + std::unique_ptr storageClient_; std::unique_ptr metaClient_; nebula::GraphSpaceID spaceId_; }; diff --git a/src/tools/storage-perf/CMakeLists.txt b/src/tools/storage-perf/CMakeLists.txt index deb1bd67277..ff7ebc52b71 100644 --- a/src/tools/storage-perf/CMakeLists.txt +++ b/src/tools/storage-perf/CMakeLists.txt @@ -16,7 +16,7 @@ set(perf_test_deps $ $ $ - $ + $ $ $ $ diff --git a/src/tools/storage-perf/StorageIntegrityTool.cpp b/src/tools/storage-perf/StorageIntegrityTool.cpp index e67c96579cc..2e661e462a3 100644 --- a/src/tools/storage-perf/StorageIntegrityTool.cpp +++ b/src/tools/storage-perf/StorageIntegrityTool.cpp @@ -5,7 +5,7 @@ #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "codec/RowReader.h" #include "common/base/Base.h" #include "common/time/Duration.h" @@ -13,49 +13,33 @@ DEFINE_string(meta_server_addrs, "", "meta server address"); DEFINE_int32(io_threads, 10, "client io threads"); -DEFINE_int32(partition_num, 1024, "partition for space"); DEFINE_string(space_name, "test_space", "the space name"); -DEFINE_string(tag_name, "test_tag", "the tag name"); -DEFINE_string(prop_name, "test_prop", "the property name"); -DEFINE_string(first_vertex_id, "1", "The smallest vertex id"); -DEFINE_uint64(width, 100, "width of matrix"); -DEFINE_uint64(height, 1000, "height of matrix"); - -DECLARE_int32(heartbeat_interval_secs); +DEFINE_string(first_key, "1", "the smallest key"); +DEFINE_uint32(width, 100, "width of matrix"); +DEFINE_uint32(height, 1000, "height of matrix"); namespace nebula { namespace storage { /** - * We generate a big circle of data, all node is the vertex, and the vertex have - * only one property of the next vertex, so we can validate them by traversing. + * We generate a big circle of data, all node are key/values, the value is the next node's key + * , so we can validate them by traversing. * - * There are some gflags we need to pay attention: - * 1. The space's replica must be 1, because we don't have retry in - * StorageClient, we will update it after we support preheat. The tag must have - * only one int property, which is prop_name. - * 2. If the space and tag doesn't exists, it will try to create one, maybe you - * need to set heartbeat_interval_secs to make sure the storage service has load - * meta. - * 3. The width and height is the size of the big linked list, you can refer to - * the graph below. As expected, we can traverse the big linked list after width - * * height steps starting from any node in the list. + * The width and height is the size of the big linked list, you can refer to the graph below. As + * expected, we can traverse the big linked list after width * height steps starting from any node + * in the list. */ class IntegrityTest { public: - IntegrityTest() - : propName_(FLAGS_prop_name), - width_{FLAGS_width}, - height_{FLAGS_height}, - firstVertexId_{FLAGS_first_vertex_id} {} + IntegrityTest() : width_{FLAGS_width}, height_{FLAGS_height}, firstKey_{FLAGS_first_key} {} int run() { if (!init()) { return EXIT_FAILURE; } prepareData(); - if (!validate(firstVertexId_, width_ * height_)) { + if (!validate(firstKey_, width_ * height_)) { LOG(INFO) << "Integrity test failed"; return EXIT_FAILURE; } @@ -65,7 +49,12 @@ class IntegrityTest { private: bool init() { - FLAGS_heartbeat_interval_secs = 10; + if (static_cast(width_) * static_cast(height_) > + std::numeric_limits::max()) { + LOG(ERROR) << "Width * Height is out of range"; + return false; + } + auto metaAddrsRet = nebula::network::NetworkUtils::toHosts(FLAGS_meta_server_addrs); if (!metaAddrsRet.ok() || metaAddrsRet.value().empty()) { LOG(ERROR) << "Can't get metaServer address, status: " << metaAddrsRet.status() @@ -84,41 +73,13 @@ class IntegrityTest { auto spaceResult = mClient_->getSpaceIdByNameFromCache(FLAGS_space_name); if (!spaceResult.ok()) { - LOG(ERROR) << "Get spaceId failed, try to create one"; - meta::cpp2::SpaceDesc spaceDesc; - spaceDesc.set_space_name(FLAGS_space_name); - spaceDesc.set_partition_num(FLAGS_partition_num); - spaceDesc.set_replica_factor(1); - auto ret = mClient_->createSpace(spaceDesc).get(); - if (!ret.ok()) { - LOG(ERROR) << "Create space failed: " << ret.status(); - return false; - } - spaceId_ = ret.value(); + LOG(ERROR) << "Get spaceId failed"; + return false; } else { spaceId_ = spaceResult.value(); } - auto tagResult = mClient_->getTagIDByNameFromCache(spaceId_, FLAGS_tag_name); - if (!tagResult.ok()) { - sleep(FLAGS_heartbeat_interval_secs + 1); - LOG(ERROR) << "Get tagId failed, try to create one: " << tagResult.status(); - nebula::meta::cpp2::Schema schema; - nebula::meta::cpp2::ColumnDef column; - column.name = FLAGS_prop_name; - column.type.set_type(nebula::cpp2::PropertyType::INT64); - (*schema.columns_ref()).emplace_back(std::move(column)); - auto ret = mClient_->createTagSchema(spaceId_, FLAGS_tag_name, schema).get(); - if (!ret.ok()) { - LOG(ERROR) << "Create tag failed: " << ret.status(); - return false; - } - tagId_ = ret.value(); - } else { - tagId_ = tagResult.value(); - } - - client_ = std::make_unique(threadPool_, mClient_.get()); + client_ = std::make_unique(threadPool_, mClient_.get()); return true; } @@ -145,32 +106,31 @@ class IntegrityTest { * |___________________________| */ void prepareData() { - std::vector first; - std::vector prev; - std::vector cur; + std::vector first; + std::vector prev; + std::vector cur; - LOG(INFO) << "Start insert vertex"; + LOG(INFO) << "Start insert kvs"; for (size_t i = 0; i < width_; i++) { - prev.emplace_back(std::to_string(std::atol(firstVertexId_.c_str()) + i)); + prev.emplace_back(std::to_string(std::atoi(firstKey_.c_str()) + i)); } // leave alone the first line, generate other lines for (size_t i = 1; i < height_; i++) { - addVertex(prev, cur, std::to_string(std::atol(firstVertexId_.c_str() + i * width_))); + insertRow(prev, cur, std::to_string(std::atoi(firstKey_.c_str()) + i * width_)); prev = std::move(cur); } // shift the last line std::rotate(prev.begin(), prev.end() - 1, prev.end()); // generate first line, each node in first line will points to a node in // rotated last line, which will make the matrix a big linked list - addVertex(prev, first, firstVertexId_); + insertRow(prev, first, firstKey_); LOG(INFO) << "Prepare data ok"; } - void addVertex(std::vector& prev, std::vector& cur, VertexID startId) { - std::unordered_map> propNames; - propNames[tagId_].emplace_back(propName_); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - auto future = client_->addVertices(param, genVertices(prev, cur, startId), propNames, true); + void insertRow(const std::vector& prev, + std::vector& cur, + const std::string& startId) { + auto future = client_->put(spaceId_, genKeyValue(prev, cur, startId)); auto resp = std::move(future).get(); if (!resp.succeeded()) { for (auto& err : resp.failedParts()) { @@ -180,82 +140,45 @@ class IntegrityTest { } } - std::vector genVertices(std::vector& prev, - std::vector& cur, - VertexID startId) { - // We insert add vertices of a row once a time - std::vector newVertices; + std::vector genKeyValue(const std::vector& prev, + std::vector& cur, + const std::string& startId) { + // We insert key-values of a row once a time + std::vector kvs; for (size_t i = 0; i < width_; i++) { - VertexID vId; - vId = std::to_string(std::atol(startId.c_str()) + i); - cur.emplace_back(vId); - - storage::cpp2::NewVertex v; - v.set_id(vId); - std::vector tags; + auto key = std::to_string(std::atoi(startId.c_str()) + i); + cur.emplace_back(key); + kvs.emplace_back(std::make_pair(cur[i], prev[i])); - storage::cpp2::NewTag tag; - tag.set_tag_id(tagId_); - - std::vector props; - Value val(prev[i]); - props.emplace_back(val); - tag.set_props(props); - tags.emplace_back(std::move(tag)); - - v.set_tags(std::move(tags)); - newVertices.emplace_back(std::move(v)); VLOG(2) << "Build " << cur[i] << " -> " << prev[i]; - PLOG_EVERY_N(INFO, 10000) << "We have inserted " - << std::atol(vId.c_str()) - std::atol(firstVertexId_.c_str()) - - width_ - << " vertices so far, total: " << width_ * height_; + LOG_EVERY_N(INFO, 10000) << "We have inserted " + << std::atoi(key.c_str()) - std::atoi(firstKey_.c_str()) - width_ + << " key-value so far, total: " << width_ * height_; } - return newVertices; + return kvs; } - bool validate(VertexID startId, int64_t queryTimes) { + bool validate(const std::string& startId, int64_t queryTimes) { int64_t count = 0; - VertexID nextId = startId; + std::string nextId = startId; while (count < queryTimes) { - PLOG_EVERY_N(INFO, 1000) << "We have gone " << count << " steps so far"; - // TODO support getProps - std::vector props; - cpp2::VertexProp tagProp; - tagProp.set_tag(tagId_); - (*tagProp.props_ref()).emplace_back(propName_); - DataSet dataset({kVid}); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - auto future = client_->getProps(param, dataset, &props, nullptr, nullptr); + LOG_EVERY_N(INFO, 1000) << "We have gone " << count << " steps so far"; + auto future = client_->get(spaceId_, {nextId}); auto resp = std::move(future).get(); if (!resp.succeeded()) { - LOG(ERROR) << "Failed to fetch props of vertex " << nextId; + LOG(ERROR) << "Failed to get value of " << nextId; return false; } -// TODO -#if 0 - auto& results = resp.responses(); - // get tag schema - auto* vschema = results[0].get_vertex_schema(); - DCHECK(vschema != nullptr); - auto tagIter = vschema->find(tagId_); - DCHECK(tagIter != vschema->end()); - auto tagProvider = std::make_shared(tagIter->second); - for (auto& vdata : results[0].vertices) { - auto iter = std::find_if(vdata.tag_data.begin(), vdata.tag_data.end(), - [this] (const auto& tagData) { - return tagData.tag_id == tagId_; - }); - if (iter == vdata.tag_data.end()) { - return false; - } - auto tagReader = RowReaderWrapper::getRowReader(iter->data, tagProvider); - auto ret = RowReader::getPropByName(tagReader.get(), propName_); - CHECK(ok(ret)); - nextId = boost::get(value(ret)); - } -#endif + const auto& results = resp.responses(); + DCHECK_EQ(results.size(), 1UL); + auto kvs = results[0].get_key_values(); + auto iter = kvs.find(nextId); + if (iter == kvs.end()) { + LOG(ERROR) << "Value of " << nextId << " not found"; + return false; + } + nextId = iter->second; count++; } // after go to next node for width * height times, it should go back to @@ -267,15 +190,13 @@ class IntegrityTest { } private: - std::unique_ptr client_; + std::unique_ptr client_; std::unique_ptr mClient_; std::shared_ptr threadPool_; GraphSpaceID spaceId_; - TagID tagId_; - std::string propName_; size_t width_; size_t height_; - VertexID firstVertexId_; + std::string firstKey_; }; } // namespace storage diff --git a/src/tools/storage-perf/StoragePerfTool.cpp b/src/tools/storage-perf/StoragePerfTool.cpp index b7336097245..dbb2c9ce7ef 100644 --- a/src/tools/storage-perf/StoragePerfTool.cpp +++ b/src/tools/storage-perf/StoragePerfTool.cpp @@ -8,7 +8,7 @@ #include #include -#include "clients/storage/GraphStorageClient.h" +#include "clients/storage/StorageClient.h" #include "common/base/Base.h" #include "common/thread/GenericWorker.h" #include "common/time/Duration.h" @@ -130,7 +130,7 @@ class Perf { edgeProps_.emplace_back(edgeSchema->getFieldName(i)); } - graphStorageClient_ = std::make_unique(threadPool_, mClient_.get()); + storageClient_ = std::make_unique(threadPool_, mClient_.get()); time::Duration duration; std::vector threads; @@ -297,8 +297,8 @@ class Perf { for (auto i = 0; i < tokens; i++) { auto start = time::WallClock::fastNowInMicroSec(); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0, false); - graphStorageClient_ + StorageClient::CommonRequestParam param(spaceId_, 0, 0, false); + storageClient_ ->getNeighbors(param, colNames, vertices, @@ -332,8 +332,8 @@ class Perf { auto tokens = tokenBucket_.consumeOrDrain(FLAGS_concurrency, FLAGS_qps, FLAGS_concurrency); for (auto i = 0; i < tokens; i++) { auto start = time::WallClock::fastNowInMicroSec(); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - graphStorageClient_->addVertices(param, genVertices(), tagProps_, true) + StorageClient::CommonRequestParam param(spaceId_, 0, 0); + storageClient_->addVertices(param, genVertices(), tagProps_, true) .via(evb) .thenValue([this, start](auto&& resps) { if (!resps.succeeded()) { @@ -361,8 +361,8 @@ class Perf { auto tokens = tokenBucket_.consumeOrDrain(FLAGS_concurrency, FLAGS_qps, FLAGS_concurrency); for (auto i = 0; i < tokens; i++) { auto start = time::WallClock::fastNowInMicroSec(); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - graphStorageClient_->addEdges(param, genEdges(), edgeProps_, true) + StorageClient::CommonRequestParam param(spaceId_, 0, 0); + storageClient_->addEdges(param, genEdges(), edgeProps_, true) .via(evb) .thenValue([this, start](auto&& resps) { if (!resps.succeeded()) { @@ -394,8 +394,8 @@ class Perf { input.emplace_back(std::move(row)); auto vProps = vertexProps(); auto start = time::WallClock::fastNowInMicroSec(); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - graphStorageClient_->getProps(param, std::move(input), &vProps, nullptr, nullptr) + StorageClient::CommonRequestParam param(spaceId_, 0, 0); + storageClient_->getProps(param, std::move(input), &vProps, nullptr, nullptr) .via(evb) .thenValue([this, start](auto&& resps) { if (!resps.succeeded()) { @@ -419,8 +419,8 @@ class Perf { input.emplace_back(std::move(row)); auto eProps = edgeProps(); auto start = time::WallClock::fastNowInMicroSec(); - GraphStorageClient::CommonRequestParam param(spaceId_, 0, 0); - graphStorageClient_->getProps(param, std::move(input), nullptr, &eProps, nullptr) + StorageClient::CommonRequestParam param(spaceId_, 0, 0); + storageClient_->getProps(param, std::move(input), nullptr, &eProps, nullptr) .via(evb) .thenValue([this, start](auto&& resps) { if (!resps.succeeded()) { @@ -438,7 +438,7 @@ class Perf { private: std::atomic_long finishedRequests_{0}; - std::unique_ptr graphStorageClient_; + std::unique_ptr storageClient_; std::unique_ptr mClient_; std::shared_ptr threadPool_; GraphSpaceID spaceId_; From 5b754e58ed03ac1ab3fa08a455c77c5d1b975bcc Mon Sep 17 00:00:00 2001 From: "vince.wu" <451943+kikimo@users.noreply.github.com> Date: Fri, 26 Nov 2021 12:50:24 +0800 Subject: [PATCH 51/53] add rpc to query raft status (#3336) * add rpc to query raft status * correct format erroro * unify raft Role and Status * make raft role and status enum strict Co-authored-by: Yee <2520865+yixinglu@users.noreply.github.com> Co-authored-by: Doodle <13706157+critical27@users.noreply.github.com> --- src/interface/raftex.thrift | 32 ++++++++++++++++++++++++++++ src/kvstore/raftex/RaftPart.cpp | 12 +++++++++++ src/kvstore/raftex/RaftPart.h | 18 ++++------------ src/kvstore/raftex/RaftexService.cpp | 10 +++++++++ src/kvstore/raftex/RaftexService.h | 2 ++ 5 files changed, 60 insertions(+), 14 deletions(-) diff --git a/src/interface/raftex.thrift b/src/interface/raftex.thrift index 4c7dde896a9..51000f86af7 100644 --- a/src/interface/raftex.thrift +++ b/src/interface/raftex.thrift @@ -7,6 +7,21 @@ namespace cpp nebula.raftex include "common.thrift" +enum Role { + LEADER = 1, // the leader + FOLLOWER = 2; // following a leader + CANDIDATE = 3; // Has sent AskForVote request + LEARNER = 4; // same with FOLLOWER, except that it does + // not vote in leader election +} (cpp.enum_strict) + +enum Status { + STARTING = 0; // The part is starting, not ready for service + RUNNING = 1; // The part is running + STOPPED = 2; // The part has been stopped + WAITING_SNAPSHOT = 3; // Waiting for the snapshot. +} (cpp.enum_strict) + enum ErrorCode { SUCCEEDED = 0; @@ -155,9 +170,26 @@ struct SendSnapshotResponse { 1: ErrorCode error_code; } +struct GetStateRequest { + 1: GraphSpaceID space; // Graphspace ID + 2: PartitionID part; // Partition ID +} + +struct GetStateResponse { + 1: ErrorCode error_code; + 2: Role role; + 3: TermID term; + 4: bool is_leader; + 5: LogID committed_log_id; + 6: LogID last_log_id; + 7: TermID last_log_term; + 8: Status status; +} + service RaftexService { AskForVoteResponse askForVote(1: AskForVoteRequest req); AppendLogResponse appendLog(1: AppendLogRequest req); SendSnapshotResponse sendSnapshot(1: SendSnapshotRequest req); HeartbeatResponse heartbeat(1: HeartbeatRequest req) (thread = 'eb'); + GetStateResponse getState(1: GetStateRequest req); } diff --git a/src/kvstore/raftex/RaftPart.cpp b/src/kvstore/raftex/RaftPart.cpp index de2aa7692c0..8dd3aaad96a 100644 --- a/src/kvstore/raftex/RaftPart.cpp +++ b/src/kvstore/raftex/RaftPart.cpp @@ -1005,6 +1005,18 @@ bool RaftPart::prepareElectionRequest(cpp2::AskForVoteRequest& req, return true; } +void RaftPart::getState(cpp2::GetStateResponse& resp) { + std::lock_guard g(raftLock_); + resp.set_term(term_); + resp.set_role(role_); + resp.set_is_leader(role_ == Role::LEADER); + resp.set_error_code(cpp2::ErrorCode::SUCCEEDED); + resp.set_committed_log_id(committedLogId_); + resp.set_last_log_id(lastLogId_); + resp.set_last_log_term(lastLogTerm_); + resp.set_status(status_); +} + bool RaftPart::processElectionResponses(const RaftPart::ElectionResponses& results, std::vector> hosts, TermID proposedTerm, diff --git a/src/kvstore/raftex/RaftPart.h b/src/kvstore/raftex/RaftPart.h index dfcf818a12a..346ab53cee4 100644 --- a/src/kvstore/raftex/RaftPart.h +++ b/src/kvstore/raftex/RaftPart.h @@ -196,6 +196,8 @@ class RaftPart : public std::enable_shared_from_this { * Methods to process incoming raft requests * ****************************************************/ + void getState(cpp2::GetStateResponse& resp); + // Process the incoming leader election request void processAskForVoteRequest(const cpp2::AskForVoteRequest& req, cpp2::AskForVoteResponse& resp); @@ -236,20 +238,8 @@ class RaftPart : public std::enable_shared_from_this { std::shared_ptr> clientMan, std::shared_ptr diskMan); - enum class Status { - STARTING = 0, // The part is starting, not ready for service - RUNNING, // The part is running - STOPPED, // The part has been stopped - WAITING_SNAPSHOT // Waiting for the snapshot. - }; - - enum class Role { - LEADER = 1, // the leader - FOLLOWER, // following a leader - CANDIDATE, // Has sent AskForVote request - LEARNER // It is the same with FOLLOWER, - // except it does not participate in leader election - }; + using Status = cpp2::Status; + using Role = cpp2::Role; const char* idStr() const { return idStr_.c_str(); } diff --git a/src/kvstore/raftex/RaftexService.cpp b/src/kvstore/raftex/RaftexService.cpp index 982b807c7e6..32e40d1340a 100644 --- a/src/kvstore/raftex/RaftexService.cpp +++ b/src/kvstore/raftex/RaftexService.cpp @@ -171,6 +171,16 @@ std::shared_ptr RaftexService::findPart(GraphSpaceID spaceId, Partitio return it->second; } +void RaftexService::getState(cpp2::GetStateResponse& resp, const cpp2::GetStateRequest& req) { + auto part = findPart(req.get_space(), req.get_part()); + if (part != nullptr) { + part->getState(resp); + } else { + resp.set_term(-1); + resp.set_error_code(cpp2::ErrorCode::E_UNKNOWN_PART); + } +} + void RaftexService::askForVote(cpp2::AskForVoteResponse& resp, const cpp2::AskForVoteRequest& req) { auto part = findPart(req.get_space(), req.get_part()); if (!part) { diff --git a/src/kvstore/raftex/RaftexService.h b/src/kvstore/raftex/RaftexService.h index c79c8a8b68c..664a11c3347 100644 --- a/src/kvstore/raftex/RaftexService.h +++ b/src/kvstore/raftex/RaftexService.h @@ -38,6 +38,8 @@ class RaftexService : public cpp2::RaftexServiceSvIf { void askForVote(cpp2::AskForVoteResponse& resp, const cpp2::AskForVoteRequest& req) override; + void getState(cpp2::GetStateResponse& resp, const cpp2::GetStateRequest& req) override; + void appendLog(cpp2::AppendLogResponse& resp, const cpp2::AppendLogRequest& req) override; void sendSnapshot(cpp2::SendSnapshotResponse& resp, From 4060baec8159eefba649cc2838cb79aa77a5d910 Mon Sep 17 00:00:00 2001 From: jimingquan Date: Fri, 26 Nov 2021 13:39:21 +0800 Subject: [PATCH 52/53] report error when use agg in where clause (#3355) --- src/graph/validator/YieldValidator.cpp | 20 ++++++++++--------- tests/tck/features/yield/yield.IntVid.feature | 10 ++++++++++ tests/tck/features/yield/yield.feature | 10 ++++++++++ 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/src/graph/validator/YieldValidator.cpp b/src/graph/validator/YieldValidator.cpp index 1dea01a5910..901774e8697 100644 --- a/src/graph/validator/YieldValidator.cpp +++ b/src/graph/validator/YieldValidator.cpp @@ -146,17 +146,19 @@ Status YieldValidator::validateYieldAndBuildOutputs(const YieldClause *clause) { return Status::OK(); } -Status YieldValidator::validateWhere(const WhereClause *clause) { - Expression *filter = nullptr; - if (clause != nullptr) { - filter = clause->filter(); +Status YieldValidator::validateWhere(const WhereClause *where) { + if (where == nullptr) { + return Status::OK(); } - if (filter != nullptr) { - NG_RETURN_IF_ERROR(deduceProps(filter, exprProps_)); - auto foldRes = ExpressionUtils::foldConstantExpr(filter); - NG_RETURN_IF_ERROR(foldRes); - filterCondition_ = foldRes.value(); + auto filter = where->filter(); + if (graph::ExpressionUtils::findAny(filter, {Expression::Kind::kAggregate})) { + return Status::SemanticError("`%s', not support aggregate function in where sentence.", + filter->toString().c_str()); } + NG_RETURN_IF_ERROR(deduceProps(filter, exprProps_)); + auto foldRes = ExpressionUtils::foldConstantExpr(filter); + NG_RETURN_IF_ERROR(foldRes); + filterCondition_ = foldRes.value(); return Status::OK(); } diff --git a/tests/tck/features/yield/yield.IntVid.feature b/tests/tck/features/yield/yield.IntVid.feature index 7e0772a9d96..dcddc93b084 100644 --- a/tests/tck/features/yield/yield.IntVid.feature +++ b/tests/tck/features/yield/yield.IntVid.feature @@ -437,6 +437,16 @@ Feature: Yield Sentence Then the result should be, in any order: | AVG($var.age) | SUM($var.like) | COUNT(*) | | 34.666666666666664 | 270 | 3 | + When executing query: + """ + GO FROM hash("Tim Duncan") OVER like YIELD like._dst as id, $$ as dst | YIELD $-.dst where count($-.id) > 2 + """ + Then a SemanticError should be raised at runtime: `(count($-.id)>2)', not support aggregate function in where sentence. + When executing query: + """ + $var = go from hash("Tim Duncan") over like yield like._dst as id, $$ as dst; yield $var.dst where count($var.id) > 2 + """ + Then a SemanticError should be raised at runtime: `(count($var.id)>2)', not support aggregate function in where sentence. Scenario: EmptyInput When executing query: diff --git a/tests/tck/features/yield/yield.feature b/tests/tck/features/yield/yield.feature index 254609ba9a7..30a62d02fac 100644 --- a/tests/tck/features/yield/yield.feature +++ b/tests/tck/features/yield/yield.feature @@ -467,6 +467,16 @@ Feature: Yield Sentence """ Then the result should be, in any order, with relax comparison: | name | + When executing query: + """ + GO FROM "Tim Duncan" OVER like YIELD like._dst as id, $$ as dst | YIELD $-.dst where count($-.id) > 2 + """ + Then a SemanticError should be raised at runtime: `(count($-.id)>2)', not support aggregate function in where sentence. + When executing query: + """ + $var = go from "Tim Duncan" over like yield like._dst as id, $$ as dst; yield $var.dst where count($var.id) > 2 + """ + Then a SemanticError should be raised at runtime: `(count($var.id)>2)', not support aggregate function in where sentence. Scenario: DuplicateColumn When executing query: From 53c3c2f4807dcad9d1f0ce2727901e939e006a1b Mon Sep 17 00:00:00 2001 From: "endy.li" <25311962+heroicNeZha@users.noreply.github.com> Date: Fri, 26 Nov 2021 14:04:23 +0800 Subject: [PATCH 53/53] fix - tck contains is invalid (#3314) * fix - tck contains is invalid * test - Modify test cases that failed after bug-fix * test - compate in release * format --- tests/common/comparator.py | 1 + tests/tck/features/admin/Hosts.feature | 6 +++--- tests/tck/features/index/TagEdgeIndex.feature | 12 ++++-------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/tests/common/comparator.py b/tests/common/comparator.py index b17e8018f4b..b77870fe4f4 100644 --- a/tests/common/comparator.py +++ b/tests/common/comparator.py @@ -49,6 +49,7 @@ def s(self, b: bytes) -> str: def _whether_return(self, cmp: bool) -> bool: return ((self._contains == CmpType.EQUAL and not cmp) + or (self._contains == CmpType.CONTAINS and not cmp) or (self._contains == CmpType.NOT_CONTAINS and cmp)) def compare(self, resp: DataSet, expect: DataSet): diff --git a/tests/tck/features/admin/Hosts.feature b/tests/tck/features/admin/Hosts.feature index cc176c72d0f..2a70092f1e9 100644 --- a/tests/tck/features/admin/Hosts.feature +++ b/tests/tck/features/admin/Hosts.feature @@ -17,21 +17,21 @@ Feature: Admin hosts """ Then the result should contain: | Host | Port | Status | Role | Git Info Sha | Version | - | /\w+/ | /\d+/ | "ONLINE" | "GRAPH" | /[0-9a-f]{7}/ | EMPTY | + | /\w+/ | /\d+/ | "ONLINE" | "GRAPH" | /[0-9a-f]{7}/ | /.*/ | When executing query: """ SHOW HOSTS META; """ Then the result should contain: | Host | Port | Status | Role | Git Info Sha | Version | - | /\w+/ | /\d+/ | "ONLINE" | "META" | /[0-9a-f]{7}/ | EMPTY | + | /\w+/ | /\d+/ | "ONLINE" | "META" | /[0-9a-f]{7}/ | /.*/ | When executing query: """ SHOW HOSTS STORAGE; """ Then the result should contain: | Host | Port | Status | Role | Git Info Sha | Version | - | /\w+/ | /\d+/ | "ONLINE" | "STORAGE" | /[0-9a-f]{7}/ | EMPTY | + | /\w+/ | /\d+/ | "ONLINE" | "STORAGE" | /[0-9a-f]{7}/ | /.*/ | Scenario: Create space When executing query: diff --git a/tests/tck/features/index/TagEdgeIndex.feature b/tests/tck/features/index/TagEdgeIndex.feature index bed05641186..2d958badd44 100644 --- a/tests/tck/features/index/TagEdgeIndex.feature +++ b/tests/tck/features/index/TagEdgeIndex.feature @@ -96,10 +96,8 @@ Feature: tag and edge index tests from pytest SHOW TAG INDEX STATUS """ Then the result should contain: - | Name | Index Status | - | 'single_tag_index' | 'FINISHED' | - | 'multi_tag_index' | 'FINISHED' | - | 'disorder_tag_index' | 'FINISHED' | + | Name | Index Status | + | 'single_tag_index,multi_tag_index,disorder_tag_index' | 'FINISHED' | When executing query: """ LOOKUP ON tag_1 WHERE tag_1.col2 == 18 YIELD tag_1.col1 @@ -333,10 +331,8 @@ Feature: tag and edge index tests from pytest SHOW EDGE INDEX STATUS """ Then the result should contain: - | Name | Index Status | - | 'single_edge_index' | 'FINISHED' | - | 'multi_edge_index' | 'FINISHED' | - | 'disorder_edge_index' | 'FINISHED' | + | Name | Index Status | + | 'single_edge_index,multi_edge_index,disorder_edge_index' | 'FINISHED' | # Lookup When executing query: """