Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

动态图Tensor、老IR Variable、新IR OpResult method 支持对照表 #58118

Open
gouzil opened this issue Oct 16, 2023 · 1 comment
Open
Labels
PFCC Paddle Framework Contributor Club,https://github.com/PaddlePaddle/community/tree/master/pfcc status/new-issue 新建 type/others 其他问题

Comments

@gouzil
Copy link
Member

gouzil commented Oct 16, 2023

Note

请不要手动修改表格

用于统计现阶段支持情况

基于: 5fb978f

math 动态图Tensor 老IR Variable 新IR OpResult
T #60515
__add__ #58106
__and__ #58343
__array__
__array_ufunc__
__bool__
__class__
__deepcopy__
__delattr__
__dict__
__dir__
__div__ #57997
__doc__
__eq__ #58896
__float__
__floordiv__ #58219
__format__
__ge__ #58343
__getattribute__
__getitem__
__getstate__
__gt__ #58343
__hash__
__index__
__init__
__init_subclass__
__int__
__invert__ #58343
__le__ #58343
__len__
__long__
__lt__ #58343
__matmul__ #58219
__mod__ #58219
__module__
__mul__ #58106
__ne__ #58343
__neg__ #60166
__new__
__nonzero__
__or__ #58343
__pow__ #58219
__radd__ #58106
__rdiv__ #57997
__reduce__
__reduce_ex__
__repr__
__rmul__ #58106
__rpow__ #58219
__rsub__ #58106
__rtruediv__ #57997
__setattr__
__setitem__
__sizeof__
__str__
__sub__ #58106
__subclasshook__
__truediv__ #57997
__weakref__
__xor__ #58343
_apply
apply
_bump_inplace_version
_clear
_clear_data
_clear_dataptr
_cloneVar
_concatVar
_copy_gradient_from
_copy_to
_detectContinuesSlice
_detectEllipsis
_get_info
_get_tensor_from_selected_rows
_getitem_dygraph
_getitem_from_offset
_grad_ivar
_grad_name
_grad_value
_has_attr
_inplace_assign
_inplace_version
_is_dense_tensor_hold_allocation
_is_initialized
_is_shared_buffer_with
_is_shared_underline_tensor_with
_local_shape
_local_value
_md5sum
_numel
_offset
_place_str
_placements_str
_reconstructSliceinfo
_register_backward_hook
_register_grad_hook
_remove_attr
_remove_grad_hook
_reset_grad_inplace_version
_set_attr
_set_error_clip
_set_grad_ivar
_set_grad_type
_set_impl
_set_info
_setitem_dygraph
_share_buffer_to
_share_memory
_share_underline_tensor_to
_slice
_sliceAndConcatVar
_sliceVar
_slice_indices
_tensor_use_gpudnn
_to
_to_readable_code
_to_static_var
_unset_fake_empty
_update_desc_attr
_use_gpudnn
_uva
_zero_grads
abs #57857
abs_ #57857
acos #57857
acos_ #57857
acosh #57857
acosh_ #57857
add #57857
add_ #57857
add_n #57857
addmm #57857
addmm_ #57857
all #57857
all_used_ops
allclose #57857
amax #57857
amin #57857
angle #57857
any #57857
append #59220
apply
apply_
argmax #57857
argmin #57857
argsort #57857
as_complex #57857
as_real #57857
as_strided #57857
asin #57857
asin_ #57857
asinh #57857
asinh_ #57857
astype #58026
atan #57857
atan2 #57857
atan_ #57857
atanh #57857
atanh_ #57857
atleast_1d #57857
atleast_2d #57857
atleast_3d #57857
attr
attr_names
backward
bincount #57857
bitwise_and #57857
bitwise_and_ #57857
bitwise_left_shift #57857
bitwise_left_shift_ #57857
bitwise_not #57857
bitwise_not_ #57857
bitwise_or #57857
bitwise_or_ #57857
bitwise_right_shift #57857
bitwise_right_shift_ #57857
bitwise_xor #57857
bitwise_xor_ #57857
block
bmm #57857
broadcast_shape #57857
broadcast_tensors #57857
broadcast_to #57857
bucketize #57857
cast #57857
cast_ #57857
cauchy_ #57857
cdist #57857
ceil #57857
ceil_ #57857
cholesky #57857
cholesky_solve #57857
chunk #57857
clear_grad
clear_gradient
clip #57857
clip_ #57857
clone #59115
coalesce
cols
combinations #57857
concat #57857
cond #57857
conj #57857
contiguous
copy_
copysign #57857
copysign_ #57857
corrcoef #57857
cos #57857
cos_ #57857
cosh #57857
cosh_ #57857
count_nonzero #57857
cov #57857
cpu #59300
create_parameter #57857
create_tensor #57857
cross #57857
crows
cuda #59300
cummax #57857
cummin #57857
cumprod #57857
cumprod_ #57857
cumsum #57857
cumsum_ #57857
cumulative_trapezoid #57857
data
data_ptr
deg2rad #57857
detach
detach_
diag #57857
diag_embed #57857
diagflat #57857
diagonal #57857
diagonal_scatter #57857
diff #57857
digamma #57857
digamma_ #57857
dim #58042
dist #57857
dist_attr
divide #57857
divide_ #57857
dot #57857
dsplit #57857
dtype
eig #57857
eigvals #57857
eigvalsh #57857
element_size
equal #57857
equal_ #57857
equal_all #57857
erf #57857
erfinv #57857
erfinv_ #57857
exp #57857
exp_ #57857
expand #57857
expand_as #57857
expm1 #57857
exponential_ #57857
fill_
fill_diagonal_
fill_diagonal_tensor
fill_diagonal_tensor_
first_use
flatten #57857
flatten_ #57857
flip #57857
floor #57857
floor_ #57857
floor_divide #57857
floor_divide_ #57857
floor_mod #57857
floor_mod_ #57857
fmax #57857
fmin #57857
frac #57857
frac_ #57857
frexp #57857
gammaln #57857
gammaln_ #57857
gather #57857
gather_nd #57857
gcd #57857
gcd_ #57857
geometric_ #57857
get_defining_op
get_map_tensor
get_selected_rows
get_strides
get_tensor
get_value
grad
grad_
grad_fn
grad_name
gradient
greater_equal #57857
greater_equal_ #57857
greater_than #57857
greater_than_ #57857
has_one_use
hash
heaviside #57857
histogram #57857
histogramdd #57857
householder_product #57857
hsplit #57857
hypot #57857
hypot_ #57857
i0 #57857
i0_ #57857
i0e #57857
i1 #57857
i1e #57857
id
imag #57857
increment #57857
index_add #57857
index_add_ #57857
index_fill #57857
index_fill_ #57857
index_put #57857
index_put_ #57857
index_sample #57857
index_select #57857
indices
initialized
inner #57857
inplace_version
inverse #57857
is_complex #57857
is_contiguous
is_dense
is_dense_tensor_array_type
is_dense_tensor_type
is_dist
is_empty #57857
is_floating_point #57857
is_integer #57857
is_leaf
is_parameter
is_same
is_same_shape
is_selected_row_type
is_selected_rows
is_sparse
is_sparse_coo
is_sparse_csr
is_tensor #57857
is_tensorarray
isclose #57857
isfinite #57857
isinf #57857
isnan #57857
istft #57857
item #58042
kron #57857
kthvalue #57857
layout
lcm #57857
lcm_ #57857
ldexp #57857
ldexp_ #57857
lerp #57857
lerp_ #57857
less_equal #57857
less_equal_ #57857
less_than #57857
less_than_ #57857
lgamma #57857
lgamma_ #57857
lod_level
log #57857
log10 #57857
log10_ #57857
log1p #57857
log1p_ #57857
log2 #57857
log2_ #57857
log_ #57857
logaddexp #57857
logcumsumexp #57857
logical_and #57857
logical_and_ #57857
logical_not #57857
logical_not_ #57857
logical_or #57857
logical_or_ #57857
logical_xor #57857
logical_xor_ #57857
logit #57857
logit_ #57857
logsumexp #57857
lstsq #57857
lu #57857
lu_unpack #57857
masked_fill #57857
masked_fill_ #57857
masked_scatter #57857
masked_scatter_ #57857
masked_select #57857
matmul #57857
matrix_power #57857
max #57857
maximum #57857
mean #57857
median #57857
min #57857
minimum #57857
mm #57857
mod #57857
mod_ #57857
mode #57857
moveaxis #57857
multi_dot #57857
multigammaln #57857
multigammaln_ #57857
multinomial #57857
multiplex #57857
multiply #57857
multiply_ #57857
mv #57857
name
nan_to_num #57857
nan_to_num_ #57857
nanmean #57857
nanmedian #57857
nanquantile #57857
nansum #57857
ndim #58042
ndimension #58042
neg #57857
neg_ #57857
nextafter #57857
nnz
nonzero #57857
norm #57857
normal_ #57857
not_equal #57857
not_equal_ #57857
num_shard
numel #57857
numpy
offset
outer #57857
pca_lowrank #57857
persistable
pin_memory
pinv #57857
place #58042
placements
polar #57857
polygamma #57857
polygamma_ #57857
pop
pow #57857
pow_ #57857
process_mesh
prod #57857
put_along_axis #57857
put_along_axis_ #57857
qr #57857
quantile #57857
rad2deg #57857
rank #57857
real #57857
reciprocal #57857
reciprocal_ #57857
reconstruct_from_
register_hook
remainder #57857
remainder_ #57857
renorm #57857
renorm_ #57857
repeat_interleave #57857
replace_all_uses_with
reshape #57857
reshape_ #57857
retain_grads
reverse #57857
roll #57857
rot90 #57857
round #57857
round_ #57857
rows
rsqrt #57857
rsqrt_ #57857
scale #57857
scale_ #57857
scatter #57857
scatter_ #57857
scatter_nd #57857
scatter_nd_add #57857
select_scatter #57857
set_shape
set_string_list
set_type
set_value
set_vocab
sgn #57857
shape #57857
shard_index #57857
sigmoid #57857
sigmoid_ #57857
sign #57857
signbit #57857
sin #57857
sin_ #57857
sinh #57857
sinh_ #57857
size
slice #57857
slice_scatter #57857
solve #57857
sort #57857
split #57857
sqrt #57857
sqrt_ #57857
square #57857
squeeze #57857
squeeze_ #57857
stack #57857
stanh #57857
std #57857
stft #57857
stop_gradient
strided_slice #57857
strides
subtract #57857
subtract_ #57857
sum #57857
t #57857
t_ #57857
take #57857
take_along_axis #57857
tan #57857
tan_ #57857
tanh #57857
tanh_ #57857
tensor_split #57857
tensordot #57857
tile #57857
to
to_dense
to_sparse_coo
to_sparse_csr
to_string
tolist
top_p_sampling #57857
topk #57857
trace #57857
transpose #57857
transpose_ #57857
trapezoid #57857
tril #57857
tril_ #57857
triu #57857
triu_ #57857
trunc #57857
trunc_ #57857
type
unbind #57857
unflatten #57857
unfold #57857
uniform_ #57857
unique #57857
unique_consecutive #57857
unsqueeze #57857
unsqueeze_ #57857
unstack #57857
use_empty
value
values
vander #57857
var #57857
view #57857
view_as #57857
vsplit #57857
where #57857
where_ #57857
zero_
@gouzil gouzil added status/new-issue 新建 type/others 其他问题 labels Oct 16, 2023
@gouzil
Copy link
Member Author

gouzil commented Oct 16, 2023

记录一下生成的代码

import paddle

output_text = "\n> [!NOTE]\n> 请不要手动修改表格\n\n"
output_text += f"用于统计现阶段支持情况\n\n基于: {paddle.version.commit}"
all_dir = list(set(dir(paddle.Tensor) + dir(paddle.base.framework.Variable) + dir(paddle.base.libpaddle.pir.OpResult)))
all_dir.sort()
output_text += """
|         math          |                        动态图Tensor                         |                      老IR Variable                      |                      新IR OpResult                      |
|:---------------------:|:--------------------------------------------------------:|:------------------------------------------------------:|:------------------------------------------------------:|
"""


print(len(all_dir))
for i in all_dir:
    math_name =  f"`{i}`" if "__" in i else i
    temp_line = f"| {math_name} "
    if i in dir(paddle.Tensor):
        temp_line += "| ✅ "
    else:
        temp_line += "| ❌ "

    if i in dir(paddle.base.framework.Variable):
        temp_line += "| ✅ "
    else:
        temp_line += "| ❌ "

    if i in dir(paddle.base.libpaddle.pir.OpResult):
        # api 对应 pr 匹配规则
        if i in paddle.tensor.tensor_method_func:
                temp_line += "| #57857 |"
        else:
            match i:
                case "place" | "ndimension" | "dim" | "ndim" | "item":
                    temp_line += "| #58042 |"
                case "astype":
                    temp_line += "| #58026 |"
                case "__div__" | "__truediv__" | "__rdiv__" | "__rtruediv__":
                    temp_line += "| #57997 |"
                case "__rmul__" | "__mul__" | "__rsub__" | "__sub__" | "__radd__" | "__add__":
                    temp_line += "| #58106 |"
                case "__pow__" | "__rpow__" | "__floordiv__" | "__mod__" | "__matmul__":
                    temp_line += "| #58219 |"
                case "__ne__" | "__lt__" | "__le__" | "__gt__" | "__ge__" | "__and__" | "__or__" | "__xor__" | "__invert__":
                    temp_line += "| #58343 |"
                case "clone":
                    temp_line += "| #59115 |"
                case "append":
                    temp_line += "| #59220 |"
                case "cpu" | "cuda":
                    temp_line += "| #59300 |"
                case "__neg__":
                    temp_line += "| #60166 |"
                case "__eq__":
                    temp_line += "| #58896 |"
                case "T":
                    temp_line += "| #60515 |"
                case _:
                    temp_line += "| ✅ |"
    else:
        temp_line += "| ❌ |"

    output_text += temp_line + "\n"

with open("./method_compare.md","w") as file:
    file.write(output_text)

@paddle-bot paddle-bot bot added the PFCC Paddle Framework Contributor Club,https://github.com/PaddlePaddle/community/tree/master/pfcc label Oct 16, 2023
@SigureMo SigureMo changed the title 动态图Tensor、老IR Varialbe、新IR OpResult method 支持对照表 动态图Tensor、老IR Variable、新IR OpResult method 支持对照表 Feb 22, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
PFCC Paddle Framework Contributor Club,https://github.com/PaddlePaddle/community/tree/master/pfcc status/new-issue 新建 type/others 其他问题
Projects
None yet
Development

No branches or pull requests

2 participants