Skip to content

Commit

Permalink
[PT FE] Fix issue when FakeQuantize is not inserted after regular ope…
Browse files Browse the repository at this point in the history
…rations (#19314)
  • Loading branch information
mvafin authored Aug 22, 2023
1 parent 6eee51a commit 4882ccd
Showing 1 changed file with 7 additions and 6 deletions.
13 changes: 7 additions & 6 deletions src/frontends/pytorch/src/utils_quantize.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,12 +154,13 @@ template <OutputVector (*T)(const NodeContext&), size_t in_idx = 0, size_t out_i
OutputVector quantizable_op(const NodeContext& context) {
auto translation_res = T(context);
FRONT_END_OP_CONVERSION_CHECK(translation_res.size() > out_idx, "Not enough outputs to apply quantization.");
if (const auto quantized_pt_node = cast_quantized_fw_node(context.get_input(in_idx).get_node_shared_ptr())) {
return {context.mark_node(std::make_shared<QuantizedPtNode>(quantized_pt_node->get_type(),
translation_res[out_idx],
quantized_pt_node->get_scale(),
quantized_pt_node->get_zero_point(),
quantized_pt_node->get_dtype()))};
auto target_input = context.get_input(in_idx);
if (const auto quantized_pt_node = cast_quantized_fw_node(target_input.get_node_shared_ptr())) {
return {quantize(context,
translation_res[out_idx],
quantized_pt_node->get_scale(),
quantized_pt_node->get_zero_point(),
target_input)};
}
return translation_res;
}
Expand Down

0 comments on commit 4882ccd

Please sign in to comment.