Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CustomPass] add support for outputting the intermediate variables #55728

Merged
merged 2 commits into from
Jul 27, 2023

Conversation

ronny1996
Copy link
Contributor

PR types

New features

PR changes

Others

Description

支持输出中间变量,以及match pattern 中 N 节点 -> replace pattern 中 1 节点

import paddle
import numpy as np
from paddle.incubate.passes import ir

@paddle.incubate.passes.ir.RegisterPass
def generate_add_n():
    def pattern(x, y, cache):
        cached_y = ir.PassDesc.OP.concat(X=[cache, y])
        matmul_v2_op = ir.PassDesc.OP.matmul_v2
        matmul_v2_op.SetAttr("trans_x", False)
        matmul_v2_op.SetAttr("trans_y", False)
        return cached_y, matmul_v2_op(X=x, Y=cached_y)

    def replace(x, y, cache):
        z = paddle.incubate.passes.ir.PassDesc.OP.my_add_n(X=x, Y=y, Z=cache)
        return z, z
    return pattern, replace

@paddle.jit.to_static(input_spec=[paddle.static.InputSpec([2, 2], 'float32', 'x'),  paddle.static.InputSpec([2, 2], 'float32', 'y'), paddle.static.InputSpec([1, 1], 'int32', 'loop_cnt')])
def func(x, y, loop_cnt):
    
    cache_ = paddle.to_tensor([[1.0, 2.0],[1.0, 2.0]], dtype="float32")
    y = paddle.tensor.concat([cache_, y], axis=-1)
    result = paddle.matmul(x, y)

    i = paddle.to_tensor([0], dtype="int32")
    while i < paddle.squeeze(loop_cnt, 1):
        if cache_ is not None:
            y = paddle.tensor.concat([cache_, y], axis=-1)
        result = paddle.matmul(x, y)
        # cache_ = paddle.assign(y)
        cache_ = y
        paddle.increment(i)
    return result

# 如果使用如下函数,那么pass可以匹配。
@paddle.jit.to_static(input_spec=[paddle.static.InputSpec([2, 2], 'float32', 'x'),  paddle.static.InputSpec([2, 2], 'float32', 'y')])
def func_without_while(x, y):
    cache_ = paddle.to_tensor([[1.0, 2.0],[1.0, 2.0]], dtype="float32")
    y = paddle.tensor.concat([cache_, y], axis=-1)
    result = paddle.matmul(x, y)
    return result

paddle.utils.cpp_extension.extension_utils.load_op_meta_info_and_register_op('/opt/py37env/lib/python3.7/site-packages/paddle_custom_device/libpaddle-custom-npu.so')
print(func.concrete_program.main_program)
print(func_without_while.concrete_program.main_program)
model_file = './saved_models/func'
paddle.jit.save(func, model_file)

# inference
config = paddle.inference.Config()
config.set_prog_file(model_file + '.pdmodel')
config.enable_custom_device('npu')
config.enable_memory_optim()
pass_builder = config.pass_builder()
pass_builder.append_pass('generate_add_n')
# paddle.fluid.core.register_subgraph_pass("generate_add_n")
pass_builder.turn_on_debug()

print(pass_builder.all_passes())
predictor = paddle.inference.create_predictor(config)
np_inputs = [
            np.ones((2, 2)).astype("float32"),
            np.ones((2, 2)).astype("float32"),
            np.ones((1, 1)).astype("int32"),
        ]
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
    input_tensor = predictor.get_input_handle(name)
    input_tensor.copy_from_cpu(np_inputs[i])
# input_names = predictor.get_input_names()
# for i, name in enumerate(input_names):
#     input_tensor = predictor.get_input_handle(name)
#     input_tensor.copy_from_cpu(np.random.randn(2, 2).astype('float32'))

predictor.run()
results = []
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
    output_tensor = predictor.get_output_handle(name)
    output_data = output_tensor.copy_to_cpu()
    results.append(output_data)
print(results)

@paddle-bot
Copy link

paddle-bot bot commented Jul 26, 2023

你的PR提交成功,感谢你对开源项目的贡献!
请关注后续CI自动化测试结果,详情请参考Paddle-CI手册
Your PR has been submitted. Thanks for your contribution!
Please wait for the result of CI firstly. See Paddle CI Manual for details.

Copy link
Contributor

@qili93 qili93 left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@ronny1996 ronny1996 merged commit da25896 into PaddlePaddle:develop Jul 27, 2023
@ronny1996 ronny1996 deleted the custom_pass_multi_outputs branch July 27, 2023 06:17
wz1qqx pushed a commit to wz1qqx/Paddle that referenced this pull request Jul 31, 2023
…addlePaddle#55728)

* add support for outputting the intermediate variables

* fix fuse_rresnet_unit
jinjidejinmuyan pushed a commit to jinjidejinmuyan/Paddle that referenced this pull request Aug 30, 2023
…addlePaddle#55728)

* add support for outputting the intermediate variables

* fix fuse_rresnet_unit
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants