Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】add paddle set_value op #15888

Merged
merged 22 commits into from
Jun 5, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
106 changes: 106 additions & 0 deletions src/frontends/paddle/src/op/set_value.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
// // Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

#define MAX_VALUE(T) std::numeric_limits<T>::max()

namespace ov {
namespace frontend {
namespace paddle {
namespace op {

std::shared_ptr<default_opset::Constant> get_max_value_by_dtype(ov::element::Type dtype) {
if (dtype == element::f32)
return default_opset::Constant::create(dtype, {}, {MAX_VALUE(float)});
else
return default_opset::Constant::create(dtype, {}, {MAX_VALUE(int)});
};

Output<Node> handle_minus_index(const OutputVector& node, const Output<Node>& dim) {
const auto zero = default_opset::Constant::create(element::i64, {1}, {0});
const auto new_node = std::make_shared<default_opset::Concat>(node, 0);
const auto mask = std::make_shared<default_opset::Less>(new_node, zero);
const auto res = std::make_shared<default_opset::Add>(new_node, dim);
return std::make_shared<default_opset::Select>(mask, res, new_node);
}

Output<Node> handle_minus_index(const std::vector<int64_t>& node, const Output<Node>& dim) {
const auto zero = default_opset::Constant::create(element::i64, {1}, {0});
const auto new_node = default_opset::Constant::create(element::i64, {node.size()}, node);
const auto mask = std::make_shared<default_opset::Less>(new_node, zero);
const auto res = std::make_shared<default_opset::Add>(new_node, dim);
return std::make_shared<default_opset::Select>(mask, res, new_node);
}

NamedOutputs set_value(const NodeContext& node) {
const auto input_node = node.get_input("Input");
auto value_node = node.get_input("ValueTensor");
PADDLE_OP_CHECK(node, (input_node.get_partial_shape().rank().is_static()), "rank must be static");
const auto dims = static_cast<int64_t>(input_node.get_partial_shape().rank().get_length());
const auto dtype = input_node.get_element_type();
const auto axes = node.get_attribute<std::vector<int64_t>>("axes");

auto input_shape = std::make_shared<default_opset::ShapeOf>(input_node);

Output<Node> padding_starts_node, padding_ends_node, starts, ends, steps;

// get starts ends and steps
const auto axes_node = default_opset::Constant::create(element::i64, {axes.size(), 1}, axes);
const auto spec_dim_node = std::make_shared<default_opset::GatherND>(input_shape, axes_node);
if (node.has_input("StartsTensorList") && node.has_input("StepsTensorList") && node.has_input("EndsTensorList")) {
meiyang-intel marked this conversation as resolved.
Show resolved Hide resolved
starts = handle_minus_index(node.get_ng_inputs("StartsTensorList"), spec_dim_node);
ends = handle_minus_index(node.get_ng_inputs("EndsTensorList"), spec_dim_node);
steps = handle_minus_index(node.get_ng_inputs("StepsTensorList"), spec_dim_node);
Asthestarsfalll marked this conversation as resolved.
Show resolved Hide resolved
} else if (node.has_attribute("starts") && node.has_attribute("steps") && node.has_attribute("ends")) {
starts = handle_minus_index(node.get_attribute<std::vector<int64_t>>("starts"), spec_dim_node);
ends = handle_minus_index(node.get_attribute<std::vector<int64_t>>("ends"), spec_dim_node);
auto step_vec = node.get_attribute<std::vector<int64_t>>("steps");
for (size_t i = 0; i < step_vec.size(); i++)
PADDLE_OP_CHECK(node, (step_vec[i] == 1), "Elements of steps must be 1");
Asthestarsfalll marked this conversation as resolved.
Show resolved Hide resolved
steps = handle_minus_index(step_vec, spec_dim_node);
} else
PADDLE_OP_CHECK(node, (false), "Invalid arguments!");

// get padding starts
padding_starts_node =
default_opset::Constant::create(element::i64, {static_cast<size_t>(dims)}, std::vector<int64_t>(dims));
padding_starts_node = std::make_shared<default_opset::ScatterNDUpdate>(padding_starts_node, axes_node, starts);

// get padding ends
padding_ends_node =
default_opset::Constant::create(element::i64, {static_cast<size_t>(dims)}, std::vector<int64_t>(dims));
const auto ends_update_node = std::make_shared<default_opset::Subtract>(spec_dim_node, ends);
padding_ends_node =
std::make_shared<default_opset::ScatterNDUpdate>(padding_ends_node, axes_node, ends_update_node);

// get target value shape
Output<Node> value_shape_update_node = std::make_shared<default_opset::Add>(ends_update_node, starts);
value_shape_update_node = std::make_shared<default_opset::Subtract>(spec_dim_node, value_shape_update_node);
Asthestarsfalll marked this conversation as resolved.
Show resolved Hide resolved
const auto value_target_shape =
std::make_shared<default_opset::ScatterNDUpdate>(input_shape, axes_node, value_shape_update_node);

// broadcast
value_node = std::make_shared<default_opset::Broadcast>(value_node, value_target_shape);

const auto maximum_value = get_max_value_by_dtype(dtype);

const auto padded_value = std::make_shared<default_opset::Pad>(value_node,
padding_starts_node,
padding_ends_node,
maximum_value,
ngraph::op::PadMode::CONSTANT);

const auto value_mask = std::make_shared<default_opset::Equal>(padded_value, maximum_value);

return node.default_single_output_mapping(
{std::make_shared<default_opset::Select>(value_mask, input_node, padded_value)},
{"Out"});
};

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ OP_CONVERTER(rnn);
OP_CONVERTER(roi_align);
OP_CONVERTER(scale);
OP_CONVERTER(select_input);
OP_CONVERTER(set_value);
OP_CONVERTER(shape);
OP_CONVERTER(slice);
OP_CONVERTER(softmax);
Expand Down Expand Up @@ -205,6 +206,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"roi_align", op::roi_align},
{"scale", op::scale},
{"select_input", op::select_input},
{"set_value", op::set_value},
{"shape", op::shape},
{"slice", op::slice},
{"softmax", op::softmax},
Expand Down
7 changes: 7 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -446,6 +446,13 @@ static const std::vector<std::string> models{
std::string("scale_bias_before_int64"),
std::string("scale_tensor_bias_after"),
std::string("scale_tensor_bias_before"),
std::string("set_value1"),
std::string("set_value2"),
std::string("set_value3"),
std::string("set_value4"),
std::string("set_value5"),
std::string("set_value_dynamic1"),
std::string("set_value_dynamic2"),
std::string("shape"),
std::string("sigmoid"),
std::string("slice"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import sys

#
# set_value paddle model generator
#
import numpy as np
import paddle
from save_model import saveModel


def concat(data):
data = [np.expand_dims(d, 0) for d in data]
return np.concatenate(data, axis=0)


def paddle_set_value(name: str, x, value, callback, dtype, starts=None, ends=None, steps=None, is_dynamic=False):

paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
node_x = paddle.static.data(name="x", shape=x.shape, dtype=dtype)
value_shape = (0,) if isinstance(value, (int, float)) else value.shape
value_shape = (-1,) * len(value_shape) if is_dynamic else value_shape
node_v = paddle.static.data(name="v", shape=value_shape, dtype=dtype)
cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())
feed = {"x": x, "v": value}
inputs = [x, value]
if starts is None:
out = callback(paddle.clone(node_x), node_v)
else:
input_starts = concat(starts)
input_ends = concat(ends)
input_steps = concat(steps)
node_starts = paddle.assign(input_starts)
node_ends = paddle.assign(input_ends)
node_steps = paddle.assign(input_steps)
out = callback(paddle.clone(node_x), node_v, node_starts, node_ends, node_steps)

outs = exe.run(feed=feed, fetch_list=[out])
saveModel(name, exe, feedkeys=list(feed.keys()), fetchlist=[out], inputs=inputs, outputs=[outs[0]], target_dir=sys.argv[1])


def build_slice(starts, ends, steps) -> list:
outs = []
for st, ed, step in zip(starts, ends, steps):
outs.append(slice(st, ed, step))
return outs


def generate_data(data, dtype):
return [np.array(d).astype(dtype) for d in data]


def main():
shape = (2, 3, 4, 5)
dtype = "float32"
data = np.random.random(shape).astype(dtype)
value = np.array([0]).astype(dtype)

def set_value1(x, value):
x[1:2, :, 2:4] = value
return x

paddle_set_value("set_value1", data, value, set_value1, dtype)

shape = (5, 3, 1)
dtype = "float32"
data = np.random.random(shape).astype("float32")
value = np.random.random((3, 3, 1)).astype(dtype)

def set_value2(x, value):
x[2:5] = value
return x

paddle_set_value("set_value2", data, value, set_value2, dtype)

shape = (10, 2, 5)
dtype = "int32"
data = np.random.randint(0, 5, shape).astype(dtype)
value = np.random.randint(0, 2, (10, 2, 3)).astype(dtype)

def set_value3(x, value):
x[:, :, -4:-1] = value
return x

paddle_set_value("set_value3", data, value, set_value3, dtype)

shape = (10, 2, 5)
dtype = "float32"
data = np.random.randn(*shape).astype(dtype)
value = np.random.randn(3, 1, 1).astype(dtype)
starts = generate_data([-4, 0, 1], np.int64)
ends = generate_data([-1, 1, 3], np.int64)
steps = generate_data([1, 1, 1], np.int64)

def set_value4(x, value, *slice):
x[build_slice(*slice)] = value
return x

paddle_set_value("set_value4", data, value, set_value4, dtype, starts, ends, steps)

shape = (10, 5)
dtype = "int32"
data = np.random.randint(0, 5, shape).astype(dtype)
value = np.random.randint(0, 2, (1, )).astype(dtype)
starts = generate_data([-4], np.int64)
ends = generate_data([-1], np.int64)
steps = generate_data([1], np.int64)

def set_value5(x, value, *slice):
x[build_slice(*slice)] = value
return x

paddle_set_value("set_value5", data, value, set_value5, dtype, starts, ends, steps)

shape = (10, 5)
dtype = "float32"
data = np.random.randint(0, 5, shape).astype(dtype)
value = np.random.randint(0, 2, (10, 3)).astype(dtype)

def set_value6(x, value):
x[:, -4:-1] = value
return x

paddle_set_value("set_value_dynamic1", data, value, set_value6, dtype, is_dynamic=True)

shape = (10, 5)
dtype = "int32"
data = np.random.randint(0, 5, shape).astype(dtype)
value = np.random.randint(0, 2, (1, )).astype(dtype)
starts = generate_data([-4], np.int64)
ends = generate_data([-1], np.int64)
steps = generate_data([1], np.int64)

def set_value7(x, value, *slice):
x[build_slice(*slice)] = value
return x

meiyang-intel marked this conversation as resolved.
Show resolved Hide resolved
paddle_set_value("set_value_dynamic2", data, value, set_value7, dtype, starts, ends, steps, is_dynamic=True)

if __name__ == "__main__":
main()