Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable Paddle FastSpeech2 model #23311

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions src/frontends/paddle/src/op/round.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs round(const NodeContext& node) {
return node.default_single_output_mapping(
{std::make_shared<default_opset::Round>(node.get_input("X"),
ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO)},
{"Out"});
}

} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
61 changes: 42 additions & 19 deletions src/frontends/paddle/src/op/set_value.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//

#include <limits>

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

Expand All @@ -20,11 +22,11 @@ std::shared_ptr<Node> handle_minus_index(const std::vector<int64_t>& node, const
return new_node;
}

// std::shared_ptr<Node> handle_maximum_index(Output<Node>& node, const Output<Node>& update_node) {
// const auto maximum_node = default_opset::Constant::create(element::i64, {1}, {INT_MAX});
// const auto mask = std::make_shared<default_opset::Equal>(node, maximum_node);
// return std::make_shared<default_opset::Select>(mask, update_node, node);
// }
std::shared_ptr<Node> handle_maximum_index(Output<Node>& node, const Output<Node>& update_node) {
const auto maximum_node = default_opset::Constant::create(element::i64, {1}, {std::numeric_limits<int32_t>::max()});
const auto mask = std::make_shared<default_opset::Equal>(node, maximum_node);
return std::make_shared<default_opset::Select>(mask, update_node, node);
}

bool is_contain_minus(const std::vector<int64_t> vec) {
for (int64_t i : vec) {
Expand Down Expand Up @@ -52,14 +54,14 @@ NamedOutputs set_value(const NodeContext& node) {
// Given:
// input_data: shape(5, 6, 7, 8, 9)
// update_value: shape(1, 6, 3, 3)
// operation: input_data[:, :, 2: 7: 2, -4: -1] = update_value
// operation: input_data[:, :, 2: 7: 2, -4: -1, :] = update_value
// axes = [2, 3]
// starts = [2, -4]
// ends = [7, -1]
// steps = [2, 1]
// Our process is:
// 1. Get axes [2, 3], get shape of input [5, 6, 7, 8, 9], select dimension from shape by axes: [7, 8].
// 2. Get starts [2, -4] and ends [3, -1]. Process minus starts and ends. starts: [2, 4], ends: [7, 7].
// 2. Get starts [2, -4] and ends [7, -1]. Process minus starts and ends. starts: [2, 4], ends: [7, 7].
// 3. Calculate starts_node, ends_node and steps_node
// 1. Create `starts node` filled with 0. Update `starts` to `starts_node` according to axes.
// starts_node[axes[i]] = starts[i] for i in axes.size
Expand Down Expand Up @@ -92,25 +94,41 @@ NamedOutputs set_value(const NodeContext& node) {
const auto slice_shape = default_opset::Constant::create(ov::element::i64, {1, 1}, {-1});

// get positive starts ends and steps
if (node.has_input("StartsTensorList") && node.has_input("StepsTensorList") && node.has_input("EndsTensorList")) {
if (node.has_input("StartsTensorList")) {
starts = handle_minus_index(node.get_ng_inputs("StartsTensorList"), spec_dim_node);
ends = handle_minus_index(node.get_ng_inputs("EndsTensorList"), spec_dim_node);
steps = std::make_shared<default_opset::Concat>(node.get_ng_inputs("StepsTensorList"), 0);
} else if (node.has_attribute("starts") && node.has_attribute("steps") && node.has_attribute("ends")) {
const auto start_vec = node.get_attribute<std::vector<int64_t>>("starts");
const auto ends_vec = node.get_attribute<std::vector<int64_t>>("ends");
const auto step_vec = node.get_attribute<std::vector<int64_t>>("steps");
if (is_contain_minus(start_vec) || is_contain_minus(ends_vec) || is_contain_minus(step_vec)) {
PADDLE_OP_CHECK(node, (false), "Currently not support minus start, ends and steps!");
} else if (node.has_attribute("starts")) {
auto start_vec = node.get_attribute<std::vector<int64_t>>("starts");
if (is_contain_minus(start_vec)) {
PADDLE_OP_CHECK(node, (false), "Currently not support minus start!");
}
starts = handle_minus_index(start_vec, spec_dim_node);
} else
PADDLE_OP_CHECK(node, (false), "Invalid arguments!");

if (node.has_input("EndsTensorList")) {
ends = handle_minus_index(node.get_ng_inputs("EndsTensorList"), spec_dim_node);
} else if (node.has_attribute("ends")) {
auto ends_vec = node.get_attribute<std::vector<int64_t>>("ends");
if (is_contain_minus(ends_vec)) {
PADDLE_OP_CHECK(node, (false), "Currently not support minus ends!");
}
ends = handle_minus_index(ends_vec, spec_dim_node);
steps = default_opset::Constant::create(element::i64, {step_vec.size()}, step_vec);
} else
PADDLE_OP_CHECK(node, (false), "Invalid arguments!");

if (node.has_input("StepsTensorList")) {
steps = handle_minus_index(node.get_ng_inputs("StepsTensorList"), spec_dim_node);
} else if (node.has_attribute("steps")) {
auto step_vec = node.get_attribute<std::vector<int64_t>>("steps");
if (is_contain_minus(step_vec)) {
PADDLE_OP_CHECK(node, (false), "Currently not support minus steps!");
}
steps = handle_minus_index(step_vec, spec_dim_node);
} else
PADDLE_OP_CHECK(node, (false), "Invalid arguments!");

// for unsepcified end: x[::2], end will be 2147483647
// ends = handle_maximum_index(ends, spec_dim_node);
ends = handle_maximum_index(ends, spec_dim_node);

// 3.1 get starts node
starts_node =
Expand Down Expand Up @@ -142,7 +160,12 @@ NamedOutputs set_value(const NodeContext& node) {
std::make_shared<default_opset::ScatterNDUpdate>(input_shape, axes_node, value_shape_update_node);

// 4.5 broadcast
value_node = std::make_shared<default_opset::Broadcast>(value_node, value_target_shape);
auto value_shape = std::make_shared<default_opset::ShapeOf>(value_node);
auto value_rank = std::make_shared<default_opset::ShapeOf>(value_shape);
auto value_rank_scalar = std::make_shared<default_opset::Squeeze>(value_rank);
Output<Node> broadcast_axes =
std::make_shared<default_opset::Range>(zero_node, value_rank_scalar, one_node, element::i64);
value_node = std::make_shared<default_opset::Broadcast>(value_node, value_target_shape, broadcast_axes);

// get total number of elements
const auto numel_node = std::make_shared<default_opset::ReduceProd>(input_shape, zero_node);
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ OP_CONVERTER(reshape2);
OP_CONVERTER(reverse);
OP_CONVERTER(rnn);
OP_CONVERTER(roi_align);
OP_CONVERTER(round);
OP_CONVERTER(scale);
OP_CONVERTER(select_input);
OP_CONVERTER(set_value);
Expand Down Expand Up @@ -233,6 +234,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"reverse", op::reverse},
{"rnn", op::rnn},
{"roi_align", op::roi_align},
{"round", op::round},
{"scale", op::scale},
{"select_input", op::select_input},
{"set_value", op::set_value},
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,7 @@ static const std::vector<std::string> models{
std::string("rnn_lstm_layer_2_bidirectional_seq_len_4/rnn_lstm_layer_2_bidirectional_seq_len_4.pdmodel"),
std::string("roi_align_test"),
std::string("roi_align_test2"),
std::string("round"),
std::string("scale_bias_after_float32"),
std::string("scale_bias_after_int32"),
std::string("scale_bias_after_int64"),
Expand All @@ -505,6 +506,7 @@ static const std::vector<std::string> models{
std::string("set_value5"),
// std::string("set_value6"),
// std::string("set_value7"),
// std::string("set_value8"),
// std::string("set_value_dynamic1"),
std::string("set_value_dynamic2"),
std::string("shape"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

#
# round paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle
import sys

data_type = 'float32'

def paddle_round(name:str, x):
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
out = paddle.round(data)

cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())

outs = exe.run(
feed={'x': x},
fetch_list=[out])

saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])

return outs[0]

def main():
x = np.random.uniform(-1000,1000, (8, 24, 32)).astype(data_type)

paddle_round("round", x)

if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import paddle
from save_model import saveModel

maxint32 = np.iinfo(np.int32).max

def concat(data):
data = [np.expand_dims(d, 0) for d in data]
Expand Down Expand Up @@ -141,6 +142,21 @@ def set_value5(x, value, *slice):

# paddle_set_value("set_value7", data, value, set_value_step1, dtype)

shape = (7, 9)
dtype = "int32"
data = np.random.randint(0, 5, shape).astype(dtype)
value = np.random.randint(-100, -1, (3, 1)).astype(dtype)

starts = generate_data([4], np.int64)
ends = generate_data([maxint32], np.int64)
steps = generate_data([1], np.int64)

def set_value8(x, value, *slice):
x[build_slice(*slice)] = value
return x

paddle_set_value("set_value8", data, value, set_value8, dtype, starts, ends, steps)

# shape = (10, 5)
# dtype = "float32"
# data = np.random.randint(0, 5, shape).astype(dtype)
Expand All @@ -167,4 +183,4 @@ def set_value7(x, value, *slice):
paddle_set_value("set_value_dynamic2", data, value, set_value7, dtype, starts, ends, steps, is_dynamic=True)

if __name__ == "__main__":
main()
main()
Loading