Skip to content

Commit

Permalink
[TF FE] Support Bincount operation (#23418)
Browse files Browse the repository at this point in the history
### Details:
 - implemented Bincount op
 - simple test cases

### Tickets:
#22071

---------

Co-authored-by: Roman Kazantsev <roman.kazantsev@intel.com>
  • Loading branch information
chux0519 and rkazants authored Mar 15, 2024
1 parent 217afee commit c208a88
Show file tree
Hide file tree
Showing 5 changed files with 147 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/frontends/tensorflow/docs/supported_ops.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ A "supported operation" is one that TensorFlow Frontend can convert to the OpenV
| BiasAdd | YES | |
| BiasAddGrad | NO | |
| BiasAddV1 | NO | |
| Bincount | NO | |
| Bincount | YES | |
| Bitcast | NO | |
| BitwiseAnd | YES | |
| BitwiseOr | YES | |
Expand Down
1 change: 1 addition & 0 deletions src/frontends/tensorflow/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops() {
{"BroadcastTo", CreatorFunction(translate_broadcast_to_op)},
{"Bucketize", CreatorFunction(translate_bucketize_op)},
{"BiasAdd", CreatorFunction(translate_bias_add_op)},
{"Bincount", CreatorFunction(translate_bincount_op)},
{"Cast", CreatorFunction(translate_cast_op)},
{"CheckNumerics", CreatorFunction(translate_identity_op)},
{"CheckNumericsV2", CreatorFunction(translate_identity_op)},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ OP_CONVERTER(translate_batch_mat_mul_op);
OP_CONVERTER(translate_batch_mat_mul_with_type_op);
OP_CONVERTER(translate_batch_to_space_nd_op);
OP_CONVERTER(translate_bias_add_op);
OP_CONVERTER(translate_bincount_op);
OP_CONVERTER(translate_broadcast_args_op);
OP_CONVERTER(translate_broadcast_to_op);
OP_CONVERTER(translate_bucketize_op);
Expand Down
79 changes: 79 additions & 0 deletions src/frontends/tensorflow_common/src/op/bincount.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <memory>

#include "common_op_table.hpp"
#include "openvino/core/shape.hpp"
#include "openvino/op/broadcast.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/less.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/range.hpp"
#include "openvino/op/reduce_sum.hpp"
#include "openvino/op/select.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/unsqueeze.hpp"

using namespace std;
using namespace ov;
using namespace ov::op;

namespace ov {
namespace frontend {
namespace tensorflow {
namespace op {

OutputVector translate_bincount_op(const NodeContext& node) {
default_op_checks(node, 3, {"Bincount"});
auto arr = node.get_input(0);
auto size = node.get_input(1);
auto weights = node.get_input(2);

auto scalar_shape = make_shared<v0::Constant>(element::i32, ov::Shape{0}, std::vector<int32_t>{});
size = make_shared<v1::Reshape>(size, scalar_shape, false);

auto weights_type = weights.get_element_type();

if (weights.get_partial_shape() == ov::Shape{0}) {
auto arr_shape = make_shared<v3::ShapeOf>(arr, element::i32);
weights = make_shared<v0::Constant>(weights_type, Shape{}, std::vector<int>{1});
weights = make_shared<v3::Broadcast>(weights, arr_shape);
}

// implementation
auto start = make_shared<v0::Constant>(element::i32, Shape{}, std::vector<int>{0});
auto step = make_shared<v0::Constant>(element::i32, Shape{}, std::vector<int>{1});
auto range = make_shared<v4::Range>(start, size, step, element::i32);

// Reshape arr and weights to 1D tensors
auto const_flatten_shape = make_shared<v0::Constant>(element::i32, Shape{1}, std::vector<int32_t>{-1});
auto arr_reshaped = make_shared<v1::Reshape>(arr, const_flatten_shape, false);
auto weights_reshaped = make_shared<v1::Reshape>(weights, const_flatten_shape, false);

// Unsqueeze range to [size, 1] shape and unsqueeze arr and weights to shapes [1, num]
auto const_axis_zero = make_shared<v0::Constant>(element::i32, Shape{1}, vector<int>({0}));
auto const_axis_one = make_shared<v0::Constant>(element::i32, Shape{1}, vector<int>({1}));
auto unsqueeze_range = make_shared<v0::Unsqueeze>(range, const_axis_one);
auto unsqueeze_arr = make_shared<v0::Unsqueeze>(arr_reshaped, const_axis_zero);
auto unsqueeze_weights = make_shared<v0::Unsqueeze>(weights_reshaped, const_axis_zero);

// Generate a mask [size, num] on range == arr
auto mask = make_shared<v1::Equal>(unsqueeze_range, unsqueeze_arr);
// Compute the weighted mask
auto mask_casted = make_shared<v0::Convert>(mask, weights_type);

auto to_sum = make_shared<v1::Multiply>(mask_casted, unsqueeze_weights);
auto reduce_axis = make_shared<v0::Constant>(element::i32, Shape{}, 1);
auto result = make_shared<v1::ReduceSum>(to_sum, reduce_axis);

set_node_name(node.get_name(), result);

return {result};
}
} // namespace op
} // namespace tensorflow
} // namespace frontend
} // namespace ov
65 changes: 65 additions & 0 deletions tests/layer_tests/tensorflow_tests/test_tf_Bincount.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# Copyright (C) 2018-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import platform

import numpy as np
import pytest
import tensorflow as tf
from common.tf_layer_test_class import CommonTFLayerTest

rng = np.random.default_rng()

class TestBincount(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
assert 'x:0' in inputs_info, "Test error: inputs_info must contain `x`"
x_shape = inputs_info['x:0']

inputs_data = {}
inputs_data['x:0'] = rng.integers(0, 8, x_shape).astype(np.int32)

if 'w:0' in inputs_info:
w_shape = inputs_info['w:0']
inputs_data['w:0'] = rng.uniform(-2.0, 2.0, w_shape).astype(self.weights_type)

return inputs_data

def create_bincount_net(self, input_shape, size, weights, weights_type):
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(np.int32, input_shape, 'x')
s = tf.constant(size)
self.weights_type = weights_type
if weights is not None:
w = tf.compat.v1.placeholder(weights_type, input_shape, 'w')
else:
w = tf.constant([], dtype=weights_type)

tf.raw_ops.Bincount(arr=x, size=s, weights=w)
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def

return tf_net, None

test_data = [
# with no weights
dict(input_shape=[], size=1, weights=None, weights_type=np.float32),
dict(input_shape=[2], size=2, weights=None, weights_type=np.float64),
dict(input_shape=[1,3], size=3, weights=None, weights_type=np.int32),
dict(input_shape=[3,1,4], size=4, weights=None, weights_type=np.int64),


# with weights
dict(input_shape=[], size=1, weights=True, weights_type=np.float32),
dict(input_shape=[2], size=2, weights=True, weights_type=np.float64),
dict(input_shape=[1,3], size=3, weights=True, weights_type=np.int32),
dict(input_shape=[3,1,4], size=4, weights=True, weights_type=np.int64),
]

@pytest.mark.parametrize("params", test_data)
@pytest.mark.precommit_tf_fe
@pytest.mark.nightly
def test_bincount(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_bincount_net(**params),
ie_device, precision, ir_version, temp_dir=temp_dir)

0 comments on commit c208a88

Please sign in to comment.