diff --git a/ngraph/core/include/ngraph/op/max_pool.hpp b/ngraph/core/include/ngraph/op/max_pool.hpp index fcfbf69132d84d..28c77448410b4c 100644 --- a/ngraph/core/include/ngraph/op/max_pool.hpp +++ b/ngraph/core/include/ngraph/op/max_pool.hpp @@ -4,8 +4,9 @@ #pragma once -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" +#include + +#include "ngraph/op/util/max_pool_base.hpp" namespace ngraph { @@ -14,7 +15,7 @@ namespace ngraph namespace v1 { /// \brief Batched max pooling operation. - class NGRAPH_API MaxPool : public Op + class NGRAPH_API MaxPool : public op::util::MaxPoolBase { public: NGRAPH_RTTI_DECLARATION; @@ -29,7 +30,7 @@ namespace ngraph /// \param pads_begin The beginning of padding shape. /// \param pads_end The end of padding shape. /// \param kernel The kernel shape. - /// \param rounding_mode Whether to use ceiling or floor rounding type while + /// \param rounding_type Whether to use ceiling or floor rounding type while /// computing output shape. /// \param auto_pad The pad type for automatically computing padding sizes. MaxPool(const Output& arg, @@ -37,8 +38,8 @@ namespace ngraph const Shape& pads_begin, const Shape& pads_end, const Shape& kernel, - op::RoundingType rounding_mode = op::RoundingType::FLOOR, - const PadType& auto_pad = op::PadType::EXPLICIT); + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -46,27 +47,6 @@ namespace ngraph virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return The kernel shape. - const Shape& get_kernel() const { return m_kernel; } - void set_kernel(const Shape& kernel) { m_kernel = kernel; } - /// \return The strides. - const Strides& get_strides() const { return m_strides; } - void set_strides(const Strides& strides) { m_strides = strides; } - /// \return The beginning of padding shape. - const Shape& get_pads_begin() const { return m_pads_begin; } - void set_pads_begin(const Shape& pads_begin) { m_pads_begin = pads_begin; } - /// \return The end of padding shape. - const Shape& get_pads_end() const { return m_pads_end; } - void set_adding_above(const Shape& pads_end) { m_pads_end = pads_end; } - /// \return The pad type for pooling. - const PadType& get_auto_pad() const { return m_auto_pad; } - void set_auto_pad(const PadType& auto_pad) { m_auto_pad = auto_pad; } - /// \return The ceiling mode being used for output shape computations - op::RoundingType get_rounding_type() const { return m_rounding_type; } - void set_rounding_type(op::RoundingType rounding_mode) - { - m_rounding_type = rounding_mode; - } /// \return The default value for MaxPool. NGRAPH_SUPPRESS_DEPRECATED_START virtual std::shared_ptr get_default_value() const override; @@ -76,21 +56,85 @@ namespace ngraph const HostTensorVector& inputs) const override; bool has_evaluate() const override; - protected: - Shape m_kernel; - Strides m_strides; - Shape m_pads_begin; - Shape m_pads_end; - PadType m_auto_pad; - op::RoundingType m_rounding_type; - private: - bool update_auto_padding(const PartialShape& in_shape, - Shape& new_pads_end, - Shape& new_pads_begin) const; bool evaluate_maxpool(const HostTensorVector& outputs, const HostTensorVector& inputs) const; }; } // namespace v1 + + namespace v8 + { + /// \brief MaxPooling operation with values and indices calculated as individual outputs + class NGRAPH_API MaxPool : public op::util::MaxPoolBase + { + public: + NGRAPH_RTTI_DECLARATION; + + /// \brief Constructs an empty MaxPool operation. + MaxPool() = default; + + /// \brief Constructs a parametrized MaxPool operation. + /// + /// \param arg Output of a node producing the feature tensor to be pooled. + /// \param strides The strides of the pooling filter. + /// \param dilations The dilations of the pooling filter. + /// \param pads_begin Paddings at the beginning of each spatial axis. + /// \param pads_end Paddings at the end of each spatial axis. + /// \param kernel The kernel shape. + /// \param rounding_type Whether to use ceiling or floor rounding type while + /// computing the output shape. + /// \param auto_pad The pad type for automatic calculation of the padding sizes. + /// \param index_element_type The data type used by the second output tensor + /// containing the selected indices. + /// \param axis Indicates a dimension in the input data shape which should be used + /// as a starting point for calculation of the upper bound of allowed + /// values of the indices output. + MaxPool(const Output& arg, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const op::RoundingType rounding_type = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT, + const element::Type index_element_type = element::i64, + const int64_t axis = 0, + const float pads_value = -std::numeric_limits::infinity()); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The pooling filter's dilations. + const Strides& get_dilations() const noexcept { return m_dilations; } + void set_dilations(const Strides& dilations) { m_dilations = dilations; } + + /// \return The data type of the second output tensor (indices). + element::Type get_index_element_type() const noexcept + { + return m_index_element_type; + } + void set_index_element_type(const element::Type index_element_type) + { + m_index_element_type = index_element_type; + } + + // \return The 'axis' attribute value. + int64_t get_axis() const { return m_axis; } + void set_axis(const int64_t axis) { m_axis = axis; } + + // \return The value stored in the padding cells. + float get_pads_value() const { return m_pads_value; } + void set_pads_value(const float pads_value) { m_pads_value = pads_value; } + + private: + Strides m_dilations; + element::Type m_index_element_type{element::i32}; + int64_t m_axis{0}; + float m_pads_value{-std::numeric_limits::infinity()}; + }; + } // namespace v8 } // namespace op } // namespace ngraph diff --git a/ngraph/core/include/ngraph/op/util/max_pool_base.hpp b/ngraph/core/include/ngraph/op/util/max_pool_base.hpp new file mode 100644 index 00000000000000..e3029734997485 --- /dev/null +++ b/ngraph/core/include/ngraph/op/util/max_pool_base.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/attr_types.hpp" + +namespace ngraph +{ + namespace op + { + namespace util + { + class NGRAPH_API MaxPoolBase : public Op + { + public: + NGRAPH_RTTI_DECLARATION; + MaxPoolBase() = default; + + /// \param arg The node producing the input data batch tensor. + /// \param strides The strides. + /// \param pads_begin The beginning of padding shape. + /// \param pads_end The end of padding shape. + /// \param kernel The kernel shape. + /// \param rounding_mode Whether to use ceiling or floor rounding type while + /// computing output shape. + /// \param auto_pad The pad type for automatically computing padding sizes. + MaxPoolBase(const Output& arg, + const Strides& strides, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const op::RoundingType rounding_mode = op::RoundingType::FLOOR, + const PadType auto_pad = op::PadType::EXPLICIT); + + void validate_and_infer_types() override; + + /// \return The kernel shape. + const Shape& get_kernel() const { return m_kernel; } + void set_kernel(const Shape& kernel) { m_kernel = kernel; } + /// \return The strides. + const Strides& get_strides() const { return m_strides; } + void set_strides(const Strides& strides) { m_strides = strides; } + /// \return The beginning of padding shape. + const Shape& get_pads_begin() const { return m_pads_begin; } + void set_pads_begin(const Shape& pads_begin) { m_pads_begin = pads_begin; } + /// \return The end of padding shape. + const Shape& get_pads_end() const { return m_pads_end; } + void set_adding_above(const Shape& pads_end) { m_pads_end = pads_end; } + /// \return The pad type for pooling. + PadType get_auto_pad() const { return m_auto_pad; } + void set_auto_pad(const PadType auto_pad) { m_auto_pad = auto_pad; } + /// \return The ceiling mode being used for output shape computations + op::RoundingType get_rounding_type() const { return m_rounding_type; } + void set_rounding_type(op::RoundingType rounding_type) + { + m_rounding_type = rounding_type; + } + + protected: + bool update_auto_padding(const PartialShape& in_shape, + const Strides& filter_dilations, + Shape& new_pads_end, + Shape& new_pads_begin) const; + + PartialShape infer_output_shape(const Strides& dilations); + + Shape m_kernel; + Strides m_strides; + Shape m_pads_begin; + Shape m_pads_end; + PadType m_auto_pad; + op::RoundingType m_rounding_type; + }; + } // namespace util + } // namespace op +} // namespace ngraph diff --git a/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp index 0f216b471d53b9..16ddf55d0bee29 100644 --- a/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp +++ b/ngraph/core/include/ngraph/opsets/opset8_tbl.hpp @@ -58,7 +58,6 @@ NGRAPH_OP(LogicalXor, ngraph::op::v1) NGRAPH_OP(LRN, ngraph::op::v0) NGRAPH_OP(LSTMCell, ngraph::op::v4) NGRAPH_OP(MatMul, ngraph::op::v0) -NGRAPH_OP(MaxPool, ngraph::op::v1) NGRAPH_OP(Maximum, ngraph::op::v1) NGRAPH_OP(Minimum, ngraph::op::v1) NGRAPH_OP(Mod, ngraph::op::v1) @@ -180,5 +179,6 @@ NGRAPH_OP(AdaptiveAvgPool, ngraph::op::v8) NGRAPH_OP(AdaptiveMaxPool, ngraph::op::v8) NGRAPH_OP(DeformableConvolution, ngraph::op::v8) NGRAPH_OP(MatrixNms, ngraph::op::v8) +NGRAPH_OP(MaxPool, ngraph::op::v8) NGRAPH_OP(MulticlassNms, ngraph::op::v8) NGRAPH_OP(RandomUniform, ngraph::op::v8) diff --git a/ngraph/core/include/ngraph/validation_util.hpp b/ngraph/core/include/ngraph/validation_util.hpp index de81ebfd171bb8..7bb64867d79266 100644 --- a/ngraph/core/include/ngraph/validation_util.hpp +++ b/ngraph/core/include/ngraph/validation_util.hpp @@ -85,7 +85,8 @@ namespace ngraph const PartialShape& window_shape, const Strides& window_strides, bool is_window_all_in_padding_allowed, - bool ceil_mode = false); + bool ceil_mode = false, + const Strides& window_dilation = Strides{}); NGRAPH_API std::tuple diff --git a/ngraph/core/src/op/avg_pool.cpp b/ngraph/core/src/op/avg_pool.cpp index f0375c54599e76..7fe28f2e9c37cc 100644 --- a/ngraph/core/src/op/avg_pool.cpp +++ b/ngraph/core/src/op/avg_pool.cpp @@ -147,7 +147,8 @@ void op::v1::AvgPool::validate_and_infer_types() m_kernel, m_strides, !m_exclude_pad, - m_rounding_type == op::RoundingType::CEIL) + m_rounding_type == op::RoundingType::CEIL, + Strides{}) // no dilation of the window : output_shape); } diff --git a/ngraph/core/src/op/max_pool.cpp b/ngraph/core/src/op/max_pool.cpp index eb41510dabb1bb..680cac142bbe28 100644 --- a/ngraph/core/src/op/max_pool.cpp +++ b/ngraph/core/src/op/max_pool.cpp @@ -14,44 +14,16 @@ using namespace std; using namespace ngraph; -bool op::v1::MaxPool::update_auto_padding(const PartialShape& in_shape, - Shape& new_pads_end, - Shape& new_pads_begin) const -{ - bool update_auto_padding_succeed = true; - if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) - { - CoordinateDiff pads_end, pads_begin; - update_auto_padding_succeed = - try_apply_auto_padding(in_shape, - m_kernel, - m_strides, - Strides(m_kernel.size(), 1), // No dilation - m_auto_pad, - pads_end, - pads_begin); - new_pads_end = Shape(pads_end.begin(), pads_end.end()); - new_pads_begin = Shape(pads_begin.begin(), pads_begin.end()); - } - return update_auto_padding_succeed; -} - -NGRAPH_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1); +NGRAPH_RTTI_DEFINITION(op::v1::MaxPool, "MaxPool", 1, op::util::MaxPoolBase); op::v1::MaxPool::MaxPool(const Output& arg, const Strides& strides, const Shape& pads_begin, const Shape& pads_end, const Shape& kernel, - op::RoundingType rounding_type, - const PadType& auto_pad) - : Op({arg}) - , m_kernel(kernel) - , m_strides(strides) - , m_pads_begin(pads_begin) - , m_pads_end(pads_end) - , m_auto_pad(auto_pad) - , m_rounding_type(rounding_type) + const op::RoundingType rounding_type, + const PadType auto_pad) + : op::util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad) { constructor_validate_and_infer_types(); } @@ -71,96 +43,13 @@ bool ngraph::op::v1::MaxPool::visit_attributes(AttributeVisitor& visitor) void op::v1::MaxPool::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_MaxPool_validate_and_infer_types); - if (0 == m_strides.size()) - { - m_strides = Strides(m_kernel.size(), 1); - } - - if (0 == m_pads_begin.size()) - { - m_pads_begin = Shape(m_kernel.size(), 0); - } - - if (0 == m_pads_end.size()) - { - m_pads_end = Shape(m_kernel.size(), 0); - } - - const PartialShape& arg_shape = get_input_partial_shape(0); - NODE_VALIDATION_CHECK(this, - arg_shape.rank().compatible(3) || arg_shape.rank().compatible(4) || - arg_shape.rank().compatible(5), - "Expected a 3D, 4D or 5D tensor for the input. Got: ", - arg_shape); + MaxPoolBase::validate_and_infer_types(); - if (arg_shape.rank().is_static()) - { - NODE_VALIDATION_CHECK(this, - static_cast(m_pads_end.size()) == - arg_shape.rank().get_max_length() - 2, - "Expected pads_end size to be equal to input size - 2. Got: ", - m_pads_end.size()); + const PartialShape output_shape = + infer_output_shape(Strides{}); // no dilations of the filter window - NODE_VALIDATION_CHECK(this, - static_cast(m_pads_begin.size()) == - arg_shape.rank().get_max_length() - 2, - "Expected pads_begin size to be equal to input size - 2. Got: ", - m_pads_begin.size()); - NODE_VALIDATION_CHECK(this, - static_cast(m_kernel.size()) == - arg_shape.rank().get_max_length() - 2, - "Expected kernel size to be equal to input size - 2. Got: ", - m_kernel.size()); - NODE_VALIDATION_CHECK(this, - static_cast(m_pads_end.size()) == - arg_shape.rank().get_max_length() - 2, - "Expected strides size to be equal to input size - 2. Got: ", - m_strides.size()); - } - - auto output_shape = PartialShape::dynamic(); - if (arg_shape.rank().is_static()) - { - output_shape = - std::vector(arg_shape.rank().get_max_length(), Dimension::dynamic()); - if (arg_shape[0].is_static()) - { - output_shape[0] = arg_shape[0]; // batch size - } - if (arg_shape[1].is_static()) - { - output_shape[1] = arg_shape[1]; // channel size - } - } - - bool update_auto_padding_succeed = true; - if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) - { - update_auto_padding_succeed = update_auto_padding(arg_shape, m_pads_end, m_pads_begin); - } - if (m_auto_pad == PadType::VALID) - { - m_pads_end = Shape(m_pads_end.size(), 0); - m_pads_begin = Shape(m_pads_begin.size(), 0); - } - // infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for - // now still take Shape (no negative padding). - CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end()); - CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end()); - - set_output_type(0, - get_input_element_type(0), - update_auto_padding_succeed - ? infer_batched_pooling_forward(this, - arg_shape, - pads_begin, - pads_end, - m_kernel, - m_strides, - true, - m_rounding_type == op::RoundingType::CEIL) - : output_shape); + set_output_type(0, get_input_element_type(0), output_shape); } shared_ptr op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const @@ -237,7 +126,7 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, auto arg_shape = inputs[0]->get_partial_shape(); auto pads_begin_s = get_pads_begin(); auto pads_end_s = get_pads_end(); - update_auto_padding(arg_shape, pads_begin_s, pads_end_s); + update_auto_padding(arg_shape, Strides(m_kernel.size(), 1), pads_begin_s, pads_end_s); CoordinateDiff pads_begin(pads_begin_s.begin(), pads_begin_s.end()); CoordinateDiff pads_end(pads_end_s.begin(), pads_end_s.end()); auto out_shape = infer_batched_pooling_forward(this, @@ -247,7 +136,8 @@ bool op::v1::MaxPool::evaluate_maxpool(const HostTensorVector& outputs, get_kernel(), get_strides(), true, - get_rounding_type() == op::RoundingType::CEIL); + get_rounding_type() == op::RoundingType::CEIL, + Strides{}); // no dilation of the window return maxpool::evaluate_maxpool(inputs[0], outputs[0], @@ -266,7 +156,7 @@ bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, bool op::v1::MaxPool::has_evaluate() const { - NGRAPH_OP_SCOPE(v0_Log_has_evaluate); + NGRAPH_OP_SCOPE(v1_MaxPool_has_evaluate); switch (get_input_element_type(0)) { case ngraph::element::i32: @@ -279,3 +169,78 @@ bool op::v1::MaxPool::has_evaluate() const } return false; } + +// ------------------------------ V8 ------------------------------ + +NGRAPH_RTTI_DEFINITION(op::v8::MaxPool, "MaxPool", 8, op::util::MaxPoolBase); + +op::v8::MaxPool::MaxPool(const Output& arg, + const Strides& strides, + const Strides& dilations, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const op::RoundingType rounding_type, + const PadType auto_pad, + const element::Type index_element_type, + const int64_t axis, + const float pads_value) + : op::util::MaxPoolBase(arg, strides, pads_begin, pads_end, kernel, rounding_type, auto_pad) + , m_dilations{dilations} + , m_index_element_type{index_element_type} + , m_axis{axis} + , m_pads_value{pads_value} +{ + constructor_validate_and_infer_types(); +} + +bool ngraph::op::v8::MaxPool::visit_attributes(AttributeVisitor& visitor) +{ + NGRAPH_OP_SCOPE(v8_MaxPool_visit_attributes); + visitor.on_attribute("strides", m_strides); + visitor.on_attribute("dilations", m_dilations); + visitor.on_attribute("pads_begin", m_pads_begin); + visitor.on_attribute("pads_end", m_pads_end); + visitor.on_attribute("kernel", m_kernel); + visitor.on_attribute("rounding_type", m_rounding_type); + visitor.on_attribute("auto_pad", m_auto_pad); + visitor.on_attribute("index_element_type", m_index_element_type); + visitor.on_attribute("axis", m_axis); + visitor.on_attribute("pads_value", m_pads_value); + return true; +} + +void op::v8::MaxPool::validate_and_infer_types() +{ + NGRAPH_OP_SCOPE(v8_MaxPool_validate_and_infer_types); + + MaxPoolBase::validate_and_infer_types(); + + const auto input_shape = get_input_partial_shape(0); + if (input_shape.rank().is_static()) + { + m_axis = ngraph::normalize_axis(this, m_axis, input_shape.rank()); + } + + const PartialShape output_shape = infer_output_shape(m_dilations); + + set_output_type(0, get_input_element_type(0), output_shape); + set_output_type(1, m_index_element_type, output_shape); +} + +shared_ptr op::v8::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const +{ + NGRAPH_OP_SCOPE(v8_MaxPool_clone_with_new_inputs); + check_new_args_count(this, new_args); + return make_shared(new_args.at(0), + m_strides, + m_dilations, + m_pads_begin, + m_pads_end, + m_kernel, + m_rounding_type, + m_auto_pad, + m_index_element_type, + m_axis, + m_pads_value); +} diff --git a/ngraph/core/src/op/util/max_pool_base.cpp b/ngraph/core/src/op/util/max_pool_base.cpp new file mode 100644 index 00000000000000..9ac14aa376cd9c --- /dev/null +++ b/ngraph/core/src/op/util/max_pool_base.cpp @@ -0,0 +1,157 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ngraph/op/util/max_pool_base.hpp" +#include "itt.hpp" +#include "ngraph/shape.hpp" + +#include + +using namespace std; +using namespace ngraph; + +NGRAPH_RTTI_DEFINITION(op::util::MaxPoolBase, "MaxPoolBase", 8); + +op::util::MaxPoolBase::MaxPoolBase(const Output& arg, + const Strides& strides, + const Shape& pads_begin, + const Shape& pads_end, + const Shape& kernel, + const op::RoundingType rounding_type, + const op::PadType auto_pad) + : Op({arg}) + , m_kernel(kernel) + , m_strides(strides) + , m_pads_begin(pads_begin) + , m_pads_end(pads_end) + , m_auto_pad(auto_pad) + , m_rounding_type(rounding_type) +{ + constructor_validate_and_infer_types(); +} + +void op::util::MaxPoolBase::validate_and_infer_types() +{ + NGRAPH_OP_SCOPE(util_MaxPoolBase_validate_and_infer_types); + + if (0 == m_strides.size()) + { + m_strides = Strides(m_kernel.size(), 1); + } + + if (0 == m_pads_begin.size()) + { + m_pads_begin = Shape(m_kernel.size(), 0); + } + + if (0 == m_pads_end.size()) + { + m_pads_end = Shape(m_kernel.size(), 0); + } + + const PartialShape& arg_shape = get_input_partial_shape(0); + + NODE_VALIDATION_CHECK(this, + arg_shape.rank().compatible(3) || arg_shape.rank().compatible(4) || + arg_shape.rank().compatible(5), + "Expected a 3D, 4D or 5D tensor for the input. Got: ", + arg_shape); + + if (arg_shape.rank().is_static()) + { + NODE_VALIDATION_CHECK(this, + static_cast(m_pads_end.size()) == + arg_shape.rank().get_max_length() - 2, + "Expected pads_end size to be equal to input size - 2. Got: ", + m_pads_end.size()); + + NODE_VALIDATION_CHECK(this, + static_cast(m_pads_begin.size()) == + arg_shape.rank().get_max_length() - 2, + "Expected pads_begin size to be equal to input size - 2. Got: ", + m_pads_begin.size()); + NODE_VALIDATION_CHECK(this, + static_cast(m_kernel.size()) == + arg_shape.rank().get_max_length() - 2, + "Expected kernel size to be equal to input size - 2. Got: ", + m_kernel.size()); + NODE_VALIDATION_CHECK(this, + static_cast(m_strides.size()) == + arg_shape.rank().get_max_length() - 2, + "Expected strides size to be equal to input size - 2. Got: ", + m_strides.size()); + } +} + +PartialShape op::util::MaxPoolBase::infer_output_shape(const Strides& dilations) +{ + NGRAPH_OP_SCOPE(util_MaxPoolBase_infer_output_shape); + + const auto& arg_shape = get_input_partial_shape(0); + + bool update_auto_padding_succeed = true; + + if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) + { + const auto filter_dilations = dilations.empty() ? Strides(m_kernel.size(), 1) : dilations; + update_auto_padding_succeed = + update_auto_padding(arg_shape, filter_dilations, m_pads_end, m_pads_begin); + } + if (m_auto_pad == PadType::VALID) + { + m_pads_end = Shape(m_pads_end.size(), 0); + m_pads_begin = Shape(m_pads_begin.size(), 0); + } + + auto output_shape = PartialShape::dynamic(); + if (update_auto_padding_succeed) + { + CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end()); + CoordinateDiff pads_end(m_pads_end.begin(), m_pads_end.end()); + output_shape = infer_batched_pooling_forward(this, + get_input_partial_shape(0), + pads_begin, + pads_end, + m_kernel, + m_strides, + true, + m_rounding_type == op::RoundingType::CEIL, + dilations); + } + else + { + if (arg_shape.rank().is_static()) + { + output_shape = + std::vector(arg_shape.rank().get_max_length(), Dimension::dynamic()); + if (arg_shape[0].is_static()) + { + output_shape[0] = arg_shape[0]; // batch size + } + if (arg_shape[1].is_static()) + { + output_shape[1] = arg_shape[1]; // channel size + } + } + } + + return output_shape; +} + +bool op::util::MaxPoolBase::update_auto_padding(const PartialShape& in_shape, + const Strides& filter_dilations, + Shape& new_pads_end, + Shape& new_pads_begin) const +{ + bool update_auto_padding_succeed = true; + if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER) + { + CoordinateDiff pads_end, pads_begin; + update_auto_padding_succeed = try_apply_auto_padding( + in_shape, m_kernel, m_strides, filter_dilations, m_auto_pad, pads_end, pads_begin); + new_pads_end = Shape(pads_end.begin(), pads_end.end()); + new_pads_begin = Shape(pads_begin.begin(), pads_begin.end()); + } + return update_auto_padding_succeed; +} diff --git a/ngraph/core/src/validation_util.cpp b/ngraph/core/src/validation_util.cpp index 1671892879a6f8..79b9b1588faa76 100644 --- a/ngraph/core/src/validation_util.cpp +++ b/ngraph/core/src/validation_util.cpp @@ -479,7 +479,8 @@ PartialShape ngraph::infer_batched_pooling_forward(const Node* node, const PartialShape& window_shape, const Strides& window_strides, bool is_window_all_in_padding_allowed, - bool ceil_mode) + bool ceil_mode, + const Strides& window_dilation) { NODE_VALIDATION_CHECK(node, data_batch_shape.rank().is_dynamic() || @@ -536,7 +537,14 @@ PartialShape ngraph::infer_batched_pooling_forward(const Node* node, // For pooling ops we don't need dilation, so we fill in the identity value (all 1). Strides data_dilation(data_spatial_shape.rank().get_length(), 1); - Strides window_dilation(data_spatial_shape.rank().get_length(), 1); + Strides dilations = window_dilation; + // if the window_dilation was not specified, generate the default value (no dilations) + if (window_dilation.empty()) + { + // dilations equal to 1 for each spatial axis mean that the window is not dilated + dilations = Strides(data_spatial_shape.rank().get_length(), 1); + } + data_output_spatial_shape = infer_windowed_reduction_output_shape(node, data_spatial_shape, @@ -545,7 +553,7 @@ PartialShape ngraph::infer_batched_pooling_forward(const Node* node, data_padding_above, window_shape, window_strides, - window_dilation, + dilations, is_window_all_in_padding_allowed, ceil_mode); } diff --git a/ngraph/test/type_prop/max_pool.cpp b/ngraph/test/type_prop/max_pool.cpp index 3c6391a52137ab..ac64f912d3fee9 100644 --- a/ngraph/test/type_prop/max_pool.cpp +++ b/ngraph/test/type_prop/max_pool.cpp @@ -157,3 +157,93 @@ TEST(type_prop, max_pool_default_values) ASSERT_EQ(mp->get_rounding_type(), op::RoundingType::FLOOR); ASSERT_EQ(mp->get_auto_pad(), op::PadType::EXPLICIT); } + +TEST(type_prop, max_pool_v8_3D_no_dilations) +{ + const PartialShape arg_shape{1, 7, 13}; + const Strides strides{1}; + const Strides dilations{1}; + const Shape pads_begin{0}; + const Shape pads_end{0}; + const Shape kernel_shape{3}; + + const auto arg = make_shared(element::f32, arg_shape); + const auto mp = + make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); + + const auto expected_output_shape = PartialShape({1, 7, 11}); + ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); + ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); +} + +TEST(type_prop, max_pool_v8_3D_with_dilations) +{ + const PartialShape arg_shape{1, 7, 13}; + const Strides strides{1}; + const Strides dilations{2}; + const Shape pads_begin{0}; + const Shape pads_end{0}; + const Shape kernel_shape{3}; + + const auto arg = make_shared(element::f32, arg_shape); + const auto mp = + make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); + + const auto expected_output_shape = PartialShape({1, 7, 9}); + ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); + ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); +} + +TEST(type_prop, max_pool_v8_3D_with_dilations_and_padding) +{ + const PartialShape arg_shape{1, 7, 13}; + const Strides strides{1}; + const Strides dilations{2}; + const Shape pads_begin{1}; + const Shape pads_end{2}; + const Shape kernel_shape{3}; + + const auto arg = make_shared(element::f32, arg_shape); + const auto mp = + make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); + + const auto expected_output_shape = PartialShape({1, 7, 12}); + ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); + ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); +} + +TEST(type_prop, max_pool_v8_4D_no_dilations) +{ + const PartialShape arg_shape{1, 3, 13, 13}; + const Strides strides{1, 1}; + const Strides dilations{1, 1}; + const Shape pads_begin{0, 0}; + const Shape pads_end{0, 0}; + const Shape kernel_shape{2, 2}; + + const auto arg = make_shared(element::f32, arg_shape); + const auto mp = + make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); + + const auto expected_output_shape = PartialShape({1, 3, 12, 12}); + ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); + ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); +} + +TEST(type_prop, max_pool_v8_4D_with_dilations) +{ + const PartialShape arg_shape{1, 3, 13, 13}; + const Strides strides{1, 1}; + const Strides dilations{2, 3}; + const Shape pads_begin{0, 0}; + const Shape pads_end{0, 0}; + const Shape kernel_shape{2, 2}; + + const auto arg = make_shared(element::f32, arg_shape); + const auto mp = + make_shared(arg, strides, dilations, pads_begin, pads_end, kernel_shape); + + const auto expected_output_shape = PartialShape({1, 3, 11, 10}); + ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(expected_output_shape)); + ASSERT_TRUE(mp->get_output_partial_shape(1).same_scheme(expected_output_shape)); +} diff --git a/ngraph/test/visitors/op/max_pool.cpp b/ngraph/test/visitors/op/max_pool.cpp index 1f7847ed912cde..bbf41adc4726e6 100644 --- a/ngraph/test/visitors/op/max_pool.cpp +++ b/ngraph/test/visitors/op/max_pool.cpp @@ -7,9 +7,7 @@ #include "ngraph/ngraph.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/opsets/opset1.hpp" -#include "ngraph/opsets/opset3.hpp" -#include "ngraph/opsets/opset4.hpp" -#include "ngraph/opsets/opset5.hpp" +#include "ngraph/opsets/opset8.hpp" #include "util/visitor.hpp" @@ -42,3 +40,39 @@ TEST(attributes, max_pool_op) EXPECT_EQ(g_max_pool->get_rounding_type(), max_pool->get_rounding_type()); EXPECT_EQ(g_max_pool->get_auto_pad(), max_pool->get_auto_pad()); } + +TEST(attributes, max_pool_v8_op) +{ + NodeBuilder::get_ops().register_factory(); + const auto data = make_shared(element::i32, Shape{1, 3, 37, 37}); + + const auto strides = Strides{1, 1}; + const auto dilations = Strides{1, 1}; + const auto pads_begin = Shape{1, 1}; + const auto pads_end = Shape{1, 1}; + const auto kernel = Shape{2, 2}; + const auto rounding_mode = op::RoundingType::CEIL; + const auto auto_pad = op::PadType::EXPLICIT; + const element::Type& index_element_type = element::i32; + + const auto max_pool = make_shared(data, + strides, + dilations, + pads_begin, + pads_end, + kernel, + rounding_mode, + auto_pad, + index_element_type); + NodeBuilder builder(max_pool); + auto g_max_pool = as_type_ptr(builder.create()); + + EXPECT_EQ(g_max_pool->get_strides(), max_pool->get_strides()); + EXPECT_EQ(g_max_pool->get_dilations(), max_pool->get_dilations()); + EXPECT_EQ(g_max_pool->get_pads_begin(), max_pool->get_pads_begin()); + EXPECT_EQ(g_max_pool->get_pads_end(), max_pool->get_pads_end()); + EXPECT_EQ(g_max_pool->get_kernel(), max_pool->get_kernel()); + EXPECT_EQ(g_max_pool->get_rounding_type(), max_pool->get_rounding_type()); + EXPECT_EQ(g_max_pool->get_auto_pad(), max_pool->get_auto_pad()); + EXPECT_EQ(g_max_pool->get_index_element_type(), max_pool->get_index_element_type()); +}