-
Notifications
You must be signed in to change notification settings - Fork 354
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[feat] Add support for argmax and argmin (#1312)
* [feat] Add support for argmax and argmin Adds support for aten::argmax and aten::argmin. Fixes # (issue) Please delete options that are not relevant and/or add your own. - Bug fix (non-breaking change which fixes an issue) - New feature (non-breaking change which adds functionality) - Breaking change (fix or feature that would cause existing functionality to not work as expected) - This change requires a documentation update - [ ] My code follows the style guidelines of this project (You can use the linters) - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas and hacks - [ ] I have made corresponding changes to the documentation - [ ] I have added tests to verify my fix or my feature - [ ] New and existing unit tests pass locally with my changes - [ ] I have added the relevant labels to my PR in so that relevant reviewers are notified * move max.cpp tests to test_max.cpp no functional change * fix permissions on max.cpp
- Loading branch information
1 parent
2af5942
commit 9db2852
Showing
4 changed files
with
241 additions
and
66 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,147 @@ | ||
#include <string> | ||
#include "core/compiler.h" | ||
#include "gtest/gtest.h" | ||
#include "tests/util/util.h" | ||
#include "torch/csrc/jit/ir/irparser.h" | ||
|
||
TEST(Converters, ATenMaxDimConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=0]() | ||
%3 : bool = prim::Constant[value=0]() | ||
%4 : Tensor, %5 : Tensor = aten::max(%x.1, %2, %3) | ||
return (%4, %5))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); | ||
} | ||
|
||
TEST(Converters, ATenMinDimConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=0]() | ||
%3 : bool = prim::Constant[value=0]() | ||
%4 : Tensor, %5 : Tensor = aten::min(%x.1, %2, %3) | ||
return (%4, %5))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[1], trt_results[1].reshape_as(jit_results[1]), 2e-6)); | ||
} | ||
|
||
TEST(Converters, ATenArgMaxConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=0]() | ||
%3 : bool = prim::Constant[value=0]() | ||
%4 : Tensor = aten::argmax(%x.1, %2, %3) | ||
return (%4))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
} | ||
|
||
TEST(Converters, ATenArgMaxKeepdimConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=1]() | ||
%3 : bool = prim::Constant[value=1]() | ||
%4 : Tensor = aten::argmax(%x.1, %2, %3) | ||
return (%4))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
} | ||
|
||
TEST(Converters, ATenArgMinConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=0]() | ||
%3 : bool = prim::Constant[value=0]() | ||
%4 : Tensor = aten::argmin(%x.1, %2, %3) | ||
return (%4))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
} | ||
|
||
TEST(Converters, ATenArgMinKeepdimConvertsCorrectly) { | ||
const auto graph = R"IR( | ||
graph(%x.1 : Tensor): | ||
%2 : int = prim::Constant[value=1]() | ||
%3 : bool = prim::Constant[value=1]() | ||
%4 : Tensor = aten::argmin(%x.1, %2, %3) | ||
return (%4))IR"; | ||
|
||
auto g = std::make_shared<torch::jit::Graph>(); | ||
torch::jit::parseIR(graph, g.get()); | ||
|
||
auto in = at::rand({2, 3, 5, 5}, {at::kCUDA}); | ||
|
||
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in}); | ||
|
||
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); | ||
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in}); | ||
|
||
ASSERT_TRUE( | ||
torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0].reshape_as(jit_results[0]), 2e-6)); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters