Skip to content

Commit

Permalink
[GPU] Conv should not extend strides and dilations for 3d output (#26415
Browse files Browse the repository at this point in the history
)

### Details:
 - *Conv should not extend strides and dilations for 3d output*
 - *Update not to call get_dims() for dynamic shape*

### Tickets:
 - *150680*
  • Loading branch information
kelvinchoi-intel authored Oct 10, 2024
1 parent 3236bec commit ebeee18
Show file tree
Hide file tree
Showing 4 changed files with 140 additions and 3 deletions.
4 changes: 2 additions & 2 deletions src/plugins/intel_gpu/src/graph/layout_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ bool should_use_winograd_2x3_s1(const convolution_node& node,

auto prim = node.get_primitive();
if (input_layout.data_type != data_types::f16
|| input_layout.feature() % 64 != 0 // current algorithm is effective for ifm to be multiply of 64
|| (input_layout.is_static() && input_layout.feature() % 64 != 0) // current algorithm is effective for ifm to be multiply of 64
|| weights_layout.spatial(0) != 3 // weights have to be 3x3 by definiton
|| weights_layout.spatial(1) != 3 // weights have to be 3x3 by definition
|| weights_layout.batch() % 64 != 0 // current algorithm is effective for ofm to be multiply of 64
Expand Down Expand Up @@ -525,7 +525,7 @@ bool layout_optimizer::convolution_byxf_opt(const layout& input_layout,
all_ones(conv->dilation) &&
!node.get_transposed() &&
node.get_groups() == 1 &&
input_layout.feature() % 32 == 0 &&
(input_layout.is_static() && input_layout.feature() % 32 == 0) &&
weights_layout.spatial(1) == 1 && output_layout.feature() % 64 == 0 &&
weights_layout.batch() % 64 == 0 &&
all_ones(conv->stride) &&
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_gpu/src/plugin/ops/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static void CreateConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::int
auto pads_end = op->get_pads_end();
auto auto_pad = op->get_auto_pad();

if (!op->is_dynamic()) {
if (!op->is_dynamic() && !p.use_new_shape_infer()) {
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
strides.resize(std::max<size_t>(2, strides.size()), 1);
dilations.resize(std::max<size_t>(2, strides.size()), 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,25 @@ const std::vector<ov::element::Type> netPrecisions = {
ov::element::f16
};

/* ============= 1D Convolution ============= */
const auto conv1DParams = ::testing::Combine(
::testing::Values(std::vector<size_t>({1})),
::testing::Values(std::vector<size_t>({1})),
::testing::Values(std::vector<ptrdiff_t>({0})),
::testing::Values(std::vector<ptrdiff_t>({0})),
::testing::Values(std::vector<size_t>({1})),
::testing::Values(192),
::testing::Values(ov::op::PadType::EXPLICIT)
);

INSTANTIATE_TEST_SUITE_P(smoke_Convolution1D, ConvolutionLayerTest,
::testing::Combine(
conv1DParams,
::testing::Values(ov::element::f32),
::testing::Values(ov::test::static_partial_shapes_to_test_representation(std::vector<ov::PartialShape>({{1, 256, 1}}))),
::testing::Values(ov::test::utils::DEVICE_GPU)),
ConvolutionLayerTest::getTestCaseName);

/* ============= 2D Convolution ============= */
const std::vector<std::vector<size_t >> kernels = {{3, 3},
{3, 5}};
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
// Copyright (C) 2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "shared_test_classes/single_op/convolution.hpp"

#include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/node_builders/convolution.hpp"
#include "common_test_utils/data_utils.hpp"
#include "common_test_utils/node_builders/constant.hpp"
#include "common_test_utils/node_builders/fake_quantize.hpp"

#include "openvino/op/parameter.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/result.hpp"
#include "openvino/op/convolution.hpp"
#include "openvino/op/fake_quantize.hpp"


namespace {
using ov::test::InputShape;

typedef std::tuple<
std::vector<InputShape>, // input shape
ov::element::Type, // Network precision
std::string // Device name
> convStaticConcatDynamicGPUTestDynamicParamsSet;
class ConvStaticConcatDynamicGPUTestDynamic : public testing::WithParamInterface<convStaticConcatDynamicGPUTestDynamicParamsSet>,
virtual public ov::test::SubgraphBaseTest {
public:
static std::string getTestCaseName(const testing::TestParamInfo<convStaticConcatDynamicGPUTestDynamicParamsSet>& obj) {
std::vector<InputShape> inputShape;
ov::element::Type model_type;
std::string targetDevice;

convStaticConcatDynamicGPUTestDynamicParamsSet basicParamsSet = obj.param;
std::tie(inputShape, model_type, targetDevice) = basicParamsSet;

std::ostringstream result;
result << "IS_Dynamic=";
result << ov::test::utils::partialShape2str({inputShape[0].first}) << "_";
for (const auto& actual_shape : {inputShape[0].second}) {
result << ov::test::utils::partialShape2str({actual_shape[0]}) << "_";
}
result << "IS_Static=";
result << ov::test::utils::partialShape2str({inputShape[1].first}) << "_";
for (const auto& actual_shape : {inputShape[1].second}) {
result << ov::test::utils::partialShape2str({actual_shape[0]}) << "_";
}
result << "model_type=" << model_type << "_";
result << "targetDevice=" << targetDevice;
return result.str();
}

protected:
void SetUp() override {
std::vector<InputShape> inputShape;
ov::element::Type model_type;
convStaticConcatDynamicGPUTestDynamicParamsSet basicParamsSet = this->GetParam();
std::tie(inputShape, model_type, targetDevice) = basicParamsSet;

init_input_shapes(inputShape);

ov::ParameterVector inputParams;
for (auto&& shape : inputDynamicShapes)
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));

// Constant weight
auto sh0 = inputShape[0].first[1].get_length();
auto sh1 = inputShape[1].first[1].get_length();
ov::PartialShape inShape1 = {sh0, sh1, 1};
auto tensor1 = ov::test::utils::create_and_fill_tensor(model_type, inShape1.to_shape());
std::shared_ptr<ov::Node> constantWeightOp = std::make_shared<ov::op::v0::Constant>(tensor1);
constantWeightOp->set_friendly_name("constantWeight");

// Static convolution
auto convolutionOp = ov::test::utils::make_convolution(inputParams[1], constantWeightOp, model_type,
{3}, {1}, {0}, {0}, {1}, ov::op::PadType::EXPLICIT, 1);
convolutionOp->set_friendly_name("convolution");

// Dynamic Concat
const auto concat = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({inputParams[0], convolutionOp}), 2);

// Function
auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) {
ov::ResultVector results;

for (size_t i = 0; i < lastNode->get_output_size(); i++)
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));

return std::make_shared<ov::Model>(results, params, "Concat");
};
function = makeFunction(model_type, inputParams, concat);
}
};

TEST_P(ConvStaticConcatDynamicGPUTestDynamic, Inference) {
run();
}

const std::vector<std::vector<ov::test::InputShape>> dynInputShapes1D = {
{
{{1, 192, ov::Dimension::dynamic()}, {{1, 192, 1}}},
{{1, 256, 1}, {{1, 256, 1}}},
},
{
{{1, 32, ov::Dimension::dynamic()}, {{1, 32, 1}}},
{{1, 48, 1}, {{1, 48, 1}}},
},
};

INSTANTIATE_TEST_SUITE_P(smoke_static_conv_n_dynamic_concat, ConvStaticConcatDynamicGPUTestDynamic,
::testing::Combine(::testing::ValuesIn(dynInputShapes1D),
::testing::Values(ov::element::f16),
::testing::Values(ov::test::utils::DEVICE_GPU)),
ConvStaticConcatDynamicGPUTestDynamic::getTestCaseName);

} // namespace

0 comments on commit ebeee18

Please sign in to comment.