diff --git a/.github/workflows/scripts/ti_build/alter.py b/.github/workflows/scripts/ti_build/alter.py index cb599bf7ff39c..a1a165ff00d4e 100644 --- a/.github/workflows/scripts/ti_build/alter.py +++ b/.github/workflows/scripts/ti_build/alter.py @@ -33,11 +33,15 @@ def add_aot_env(): def _write_ti_bashrc(): path = get_cache_home() / "ti.bashrc" + envs = get_cache_home() / "ti-env.sh" + _write_env(envs) with open(path, "w") as f: f.write( "[ -f /etc/bashrc ] && source /etc/bashrc\n" "[ -f ~/.bashrc ] && source ~/.bashrc\n" r'export PS1="\[\e]0;[Taichi Build Environment]\a\]\[\033[01;31m\][Taichi Build] \[\033[00m\]$PS1"' + "\n" + f"source {envs}\n" ) return path @@ -47,11 +51,15 @@ def _write_ti_zshrc(): dotdir = get_cache_home() / "zdotdir" dotdir.mkdir(parents=True, exist_ok=True) path = dotdir / ".zshrc" + envs = get_cache_home() / "ti-env.sh" + _write_env(envs) with open(path, "w") as f: f.write( "[ -f /etc/zsh/zshrc ] && source /etc/zsh/zshrc\n" "[ -f $HOME/.zshrc ] && source $HOME/.zshrc\n" r"export PROMPT='%{$fg_bold[red]%}[Taichi Build] %{$reset_color%}'$PROMPT" + "\n" + f"source {envs}\n" ) return dotdir @@ -138,10 +146,13 @@ def enter_shell(): os.execl(shell.exe, shell.exe) -def write_env(path): - cmake_args.writeback() +def _write_env(path): envs = os.environ.get_changed_envs() envstr = "" + + if isinstance(path, Path): + path = str(path) + if path.endswith(".ps1"): envstr = "\n".join([f'$env:{k}="{v}"' for k, v in envs.items()]) elif path.endswith(".sh"): @@ -156,6 +167,10 @@ def write_env(path): with open(path, "w") as f: f.write(envstr) + +def write_env(path): + cmake_args.writeback() + _write_env(path) misc.info(f"Environment variables written to {path}") diff --git a/c_api/docs/taichi/taichi_core.h.md b/c_api/docs/taichi/taichi_core.h.md index 8c8263562db87..54890de2738a8 100644 --- a/c_api/docs/taichi/taichi_core.h.md +++ b/c_api/docs/taichi/taichi_core.h.md @@ -305,6 +305,7 @@ Types of kernel and compute graph argument. - `enumeration.argument_type.ndarray`: ND-array wrapped around a `handle.memory`. - `enumeration.argument_type.texture`: Texture wrapped around a `handle.image`. - `enumeration.argument_type.scalar`: Typed scalar. +- `enumeration.argument_type.tensor`: Typed tensor. `bit_field.memory_usage` @@ -450,6 +451,23 @@ Scalar value represented by a power-of-two number of bits. A typed scalar value. +`union.tensor_value` + +Tensor value represented by a power-of-two number of bits. + +- `union.tensor_value.x8`: Tensor value that fits into 8 bits. +- `union.tensor_value.x16`: Tensor value that fits into 16 bits. +- `union.tensor_value.x32`: Tensor value that fits into 32 bits. +- `union.tensor_value.x64`: Tensor value that fits into 64 bits. + +`structure.tensor_value_with_length` + +A tensor value with a length. + +`structure.tensor` + +A typed tensor value. + `union.argument_value` A scalar or structured argument value. @@ -459,6 +477,7 @@ A scalar or structured argument value. - `union.argument_value.ndarray`: An ND-array to be bound. - `union.argument_value.texture`: A texture to be bound. - `union.argument_value.scalar`: An scalar to be bound. +- `union.argument_value.tensor`: A tensor to be bound. `structure.argument` diff --git a/c_api/include/taichi/cpp/taichi.hpp b/c_api/include/taichi/cpp/taichi.hpp index 61f0b15e65290..8319f96493854 100644 --- a/c_api/include/taichi/cpp/taichi.hpp +++ b/c_api/include/taichi/cpp/taichi.hpp @@ -837,6 +837,25 @@ class ComputeGraph { return compute_graph_; } }; +template +struct DataTypeToEnum { + static constexpr TiDataType value = TI_DATA_TYPE_UNKNOWN; +}; +#define DEFINE_DATA_TYPE_ENUM(type, enumv) \ + template <> \ + struct DataTypeToEnum { \ + static constexpr TiDataType value = TI_DATA_TYPE_##enumv; \ + }; + +DEFINE_DATA_TYPE_ENUM(int32_t, I32); +DEFINE_DATA_TYPE_ENUM(float, F32); +DEFINE_DATA_TYPE_ENUM(uint16_t, U16); +DEFINE_DATA_TYPE_ENUM(int16_t, I16); +DEFINE_DATA_TYPE_ENUM(uint8_t, U8); +DEFINE_DATA_TYPE_ENUM(int8_t, I8); +DEFINE_DATA_TYPE_ENUM(uint64_t, U64); +DEFINE_DATA_TYPE_ENUM(int64_t, I64); +#undef DEFINE_DATA_TYPE_ENUM class Kernel { protected: @@ -884,11 +903,12 @@ class Kernel { template void push_arg(const std::vector &v) { int idx = args_.size(); - // Temporary workaround for setting vec/matrix arguments in a flattened way. - args_.resize(args_.size() + v.size()); - for (int j = 0; j < v.size(); ++j) { - at(idx + j) = v[j]; - } + args_.resize(idx + 1); + args_[idx].type = TI_ARGUMENT_TYPE_TENSOR; + std::memcpy(args_[idx].value.tensor.contents.data.x32, v.data(), + v.size() * sizeof(T)); + args_[idx].value.tensor.contents.length = v.size(); + args_[idx].value.tensor.type = DataTypeToEnum::value; } template diff --git a/c_api/include/taichi/taichi_core.h b/c_api/include/taichi/taichi_core.h index bae403a6ff64b..f3764822c35b0 100644 --- a/c_api/include/taichi/taichi_core.h +++ b/c_api/include/taichi/taichi_core.h @@ -227,7 +227,7 @@ #pragma once #ifndef TI_C_API_VERSION -#define TI_C_API_VERSION 1005000 +#define TI_C_API_VERSION 1007000 #endif // TI_C_API_VERSION #ifndef TAICHI_H @@ -463,6 +463,8 @@ typedef enum TiArgumentType { TI_ARGUMENT_TYPE_TEXTURE = 3, // Typed scalar. TI_ARGUMENT_TYPE_SCALAR = 4, + // Typed tensor. + TI_ARGUMENT_TYPE_TENSOR = 5, TI_ARGUMENT_TYPE_MAX_ENUM = 0xffffffff, } TiArgumentType; @@ -802,6 +804,36 @@ typedef struct TiScalar { TiScalarValue value; } TiScalar; +// Union `TiTensorValue` +// +// Tensor value represented by a power-of-two number of bits. +typedef union TiTensorValue { + // Tensor value that fits into 8 bits. + uint8_t x8[128]; + // Tensor value that fits into 16 bits. + uint16_t x16[64]; + // Tensor value that fits into 32 bits. + uint32_t x32[32]; + // Tensor value that fits into 64 bits. + uint64_t x64[16]; +} TiTensorValue; + +// Structure `TiTensorValueWithLength` +// +// A tensor value with a length. +typedef struct TiTensorValueWithLength { + uint32_t length; + TiTensorValue data; +} TiTensorValueWithLength; + +// Structure `TiTensor` +// +// A typed tensor value. +typedef struct TiTensor { + TiDataType type; + TiTensorValueWithLength contents; +} TiTensor; + // Union `TiArgumentValue` (1.4.0) // // A scalar or structured argument value. @@ -818,6 +850,8 @@ typedef union TiArgumentValue { TiTexture texture; // An scalar to be bound. TiScalar scalar; + // A tensor to be bound. + TiTensor tensor; } TiArgumentValue; // Structure `TiArgument` (1.4.0) diff --git a/c_api/src/taichi_core_impl.cpp b/c_api/src/taichi_core_impl.cpp index fa91a5da9ec71..d8366bf30b067 100644 --- a/c_api/src/taichi_core_impl.cpp +++ b/c_api/src/taichi_core_impl.cpp @@ -792,6 +792,28 @@ void ti_launch_kernel(TiRuntime runtime, devallocs.emplace_back(std::move(devalloc)); break; } + case TI_ARGUMENT_TYPE_TENSOR: { + auto &tensor = arg.value.tensor; + if (tensor.type == TI_DATA_TYPE_I16 || + tensor.type == TI_DATA_TYPE_U16 || + tensor.type == TI_DATA_TYPE_F16) { + for (int j = 0; j < tensor.contents.length; j++) { + builder.set_struct_arg_impl({(int)i, j}, + tensor.contents.data.x16[j]); + } + } else if (tensor.type == TI_DATA_TYPE_I32 || + tensor.type == TI_DATA_TYPE_U32 || + tensor.type == TI_DATA_TYPE_F32) { + for (int j = 0; j < tensor.contents.length; j++) { + builder.set_struct_arg_impl({(int)i, j}, + tensor.contents.data.x32[j]); + } + } else { + ti_set_last_error(TI_ERROR_NOT_SUPPORTED, + ("args[" + std::to_string(i) + "].type").c_str()); + } + break; + } default: { ti_set_last_error(TI_ERROR_ARGUMENT_OUT_OF_RANGE, ("args[" + std::to_string(i) + "].type").c_str()); diff --git a/c_api/taichi.json b/c_api/taichi.json index 7971c3327b84c..f726a5cea9b73 100644 --- a/c_api/taichi.json +++ b/c_api/taichi.json @@ -156,7 +156,8 @@ "f32": 1, "ndarray": 2, "texture": 3, - "scalar": 4 + "scalar": 4, + "tensor": 5 } }, { @@ -484,6 +485,60 @@ } ] }, + { + "name": "tensor_value", + "type": "union", + "variants": [ + { + "name": "x8", + "type": "uint8_t", + "count": 128 + }, + { + "name": "x16", + "type": "uint16_t", + "count": 64 + }, + { + "name": "x32", + "type": "uint32_t", + "count": 32 + }, + { + "name": "x64", + "type": "uint64_t", + "count": 16 + } + ] + }, + { + "name": "tensor_value_with_length", + "type": "structure", + "fields": [ + { + "name": "length", + "type": "uint32_t" + }, + { + "name": "data", + "type": "union.tensor_value" + } + ] + }, + { + "name": "tensor", + "type": "structure", + "fields": [ + { + "name": "type", + "type": "enumeration.data_type" + }, + { + "name": "contents", + "type": "structure.tensor_value_with_length" + } + ] + }, { "name": "argument_value", "type": "union", @@ -508,6 +563,10 @@ { "name": "scalar", "type": "structure.scalar" + }, + { + "name": "tensor", + "type": "structure.tensor" } ] }, diff --git a/docs/lang/articles/c-api/taichi_core.md b/docs/lang/articles/c-api/taichi_core.md index 4f3b4c1facc91..9892305358a6b 100644 --- a/docs/lang/articles/c-api/taichi_core.md +++ b/docs/lang/articles/c-api/taichi_core.md @@ -498,6 +498,7 @@ typedef enum TiArgumentType { TI_ARGUMENT_TYPE_NDARRAY = 2, TI_ARGUMENT_TYPE_TEXTURE = 3, TI_ARGUMENT_TYPE_SCALAR = 4, + TI_ARGUMENT_TYPE_TENSOR = 5, TI_ARGUMENT_TYPE_MAX_ENUM = 0xffffffff, } TiArgumentType; ``` @@ -509,6 +510,7 @@ Types of kernel and compute graph argument. - `TI_ARGUMENT_TYPE_NDARRAY`: ND-array wrapped around a [`TiMemory`](#handle-timemory). - `TI_ARGUMENT_TYPE_TEXTURE`: Texture wrapped around a [`TiImage`](#handle-tiimage). - `TI_ARGUMENT_TYPE_SCALAR`: Typed scalar. +- `TI_ARGUMENT_TYPE_TENSOR`: Typed tensor. --- @@ -927,6 +929,7 @@ typedef union TiArgumentValue { TiNdArray ndarray; TiTexture texture; TiScalar scalar; + TiTensor tensor; } TiArgumentValue; ``` @@ -937,6 +940,7 @@ A scalar or structured argument value. - `ndarray`: An ND-array to be bound. - `texture`: A texture to be bound. - `scalar`: An scalar to be bound. +- `tensor`: A tensor to be bound. --- ### Structure `TiArgument` diff --git a/misc/make_changelog.py b/misc/make_changelog.py index f41ff81269449..a7e23e593ece4 100644 --- a/misc/make_changelog.py +++ b/misc/make_changelog.py @@ -34,16 +34,20 @@ def main(ver=None, repo_dir="."): # We need to find out the latest common commit among base and ver, # everything after this commit should be listed in the changelog. - base_commit = find_latest_tag_commit(g.tags) - commits_in_base_tag = list(g.iter_commits(base_commit, max_count=500)) - commits = list(g.iter_commits(ver, max_count=500)) - begin, end = -1, 0 + latest_release = find_latest_tag_commit(g.tags) + head = g.head.commit + mb = g.merge_base(latest_release, head) + assert len(mb) == 1 + mb = mb[0] + commits_in_base_tag = list(g.iter_commits(latest_release, max_count=500)) + commits = list(g.iter_commits((mb, head))) def format(c): return f"{c.summary} (by **{c.author}**)" notable_changes = {} all_changes = [] + by_author = {} details = load_pr_tags() @@ -75,6 +79,7 @@ def format(c): f'** Warning: tag {tag.lower()} undefined in the "details" dict. Please include the tag into "details", unless the tag is a typo.' ) all_changes.append(format(c)) + by_author.setdefault(str(c.author), []).append(s) res = "Highlights:\n" for tag in sorted(notable_changes.keys()): @@ -86,6 +91,13 @@ def format(c): for c in all_changes: res += f" - {c}\n" + if args.show_per_author: + res += "\nContributors (in alphabetical order):\n" + for author in sorted(by_author.keys()): + res += f" - {author}\n" + for item in by_author[author]: + res += f" - {item}\n" + return res @@ -93,6 +105,7 @@ def format(c): parser = argparse.ArgumentParser() parser.add_argument("--ver") parser.add_argument("--repo_dir", type=str, default=".") + parser.add_argument("--show-per-author", action="store_true", default=False) parser.add_argument("--save", action="store_true", default=False) args = parser.parse_args() res = main(args.ver, args.repo_dir) diff --git a/python/taichi/aot/utils.py b/python/taichi/aot/utils.py index f85f132a8d12b..a29bda91fd43a 100644 --- a/python/taichi/aot/utils.py +++ b/python/taichi/aot/utils.py @@ -97,11 +97,8 @@ def produce_injected_args(kernel, symbolic_args=None): texture_shape = (2,) * anno.num_dimensions injected_args.append(Texture(Format.rgba8, texture_shape)) elif isinstance(anno, MatrixType): - if not isinstance(symbolic_args[i], list): - raise RuntimeError("Expected a symbolic arg with Matrix type.") - - symbolic_mat_n = len(symbolic_args[i]) - symbolic_mat_m = len(symbolic_args[i][0]) + symbolic_mat_n = symbolic_args[i].element_shape[0] + symbolic_mat_m = symbolic_args[i].element_shape[1] if symbolic_mat_m != anno.m or symbolic_mat_n != anno.n: raise RuntimeError( diff --git a/python/taichi/graph/_graph.py b/python/taichi/graph/_graph.py index bcdef533e392a..f33ceb5e526a2 100644 --- a/python/taichi/graph/_graph.py +++ b/python/taichi/graph/_graph.py @@ -235,15 +235,7 @@ def _make_arg_matrix(kwargs: Dict[str, Any]): dtype = kwargs["dtype"] if not isinstance(dtype, MatrixType): raise TaichiRuntimeError(f"Tag ArgKind.MATRIX must specify matrix type, but got {dtype}.") - arg_list = [] - i = 0 - for _ in range(dtype.n): - arg_sublist = [] - for _ in range(dtype.m): - arg_sublist.append(_ti_core.Arg(ArgKind.MATRIX, f"{name}_mat_arg_{i}", dtype.dtype, 0, [])) - i += 1 - arg_list.append(arg_sublist) - return arg_list + return _ti_core.Arg(ArgKind.MATRIX, f"{name}_mat_arg", dtype.dtype, 0, [dtype.n, dtype.m]) def _make_arg_texture(kwargs: Dict[str, Any]): diff --git a/python/taichi/lang/ast/ast_transformer.py b/python/taichi/lang/ast/ast_transformer.py index fdca71be4cc2e..99140bd0605ef 100644 --- a/python/taichi/lang/ast/ast_transformer.py +++ b/python/taichi/lang/ast/ast_transformer.py @@ -434,17 +434,6 @@ def build_call_if_is_builtin(ctx, node, args, keywords): if id(func) in replace_func: node.ptr = replace_func[id(func)](*args, **keywords) - if func is min or func is max: - name = "min" if func is min else "max" - warnings.warn_explicit( - f'Calling builtin function "{name}" in Taichi scope is deprecated, ' - f"and it will be removed in Taichi v1.6.0." - f'Please use "ti.{name}" instead.', - DeprecationWarning, - ctx.file, - node.lineno + ctx.lineno_offset, - module="taichi", - ) return True return False diff --git a/python/taichi/lang/kernel_arguments.py b/python/taichi/lang/kernel_arguments.py index 1f1089090308a..c15b8555a8f34 100644 --- a/python/taichi/lang/kernel_arguments.py +++ b/python/taichi/lang/kernel_arguments.py @@ -7,7 +7,7 @@ from taichi.lang.any_array import AnyArray from taichi.lang.enums import Layout from taichi.lang.expr import Expr -from taichi.lang.matrix import MatrixType, VectorType, make_matrix +from taichi.lang.matrix import MatrixType from taichi.lang.struct import StructType from taichi.lang.util import cook_dtype from taichi.types.primitive_types import RefType, u64 @@ -82,14 +82,10 @@ def get_type_for_kernel_args(dtype, name): def decl_matrix_arg(matrixtype, name): - if isinstance(matrixtype, VectorType): - return make_matrix([decl_scalar_arg(matrixtype.dtype, f"{name}_{i}") for i in range(matrixtype.n)]) - return make_matrix( - [ - [decl_scalar_arg(matrixtype.dtype, f"{name}_{i}_{j}") for i in range(matrixtype.m)] - for j in range(matrixtype.n) - ] - ) + arg_type = get_type_for_kernel_args(matrixtype, name) + arg_id = impl.get_runtime().compiling_callable.insert_scalar_param(arg_type, name) + arg_load = Expr(_ti_core.make_arg_load_expr(arg_id, arg_type, create_load=False)) + return matrixtype.from_taichi_object(arg_load) def decl_struct_arg(structtype, name): diff --git a/python/taichi/lang/kernel_impl.py b/python/taichi/lang/kernel_impl.py index 226f8ef9ae5c8..6467174deb64f 100644 --- a/python/taichi/lang/kernel_impl.py +++ b/python/taichi/lang/kernel_impl.py @@ -757,39 +757,33 @@ def call_back(): ) else: raise TaichiRuntimeTypeError.get(i, needed.to_string(), v) - else: raise TaichiRuntimeTypeError.get(i, needed.to_string(), v) elif isinstance(needed, MatrixType): if needed.dtype in primitive_types.real_types: - for a in range(needed.n): - for b in range(needed.m): - if actual_argument_slot >= max_arg_num: - exceed_max_arg_num = True - break - val = v[a, b] if needed.ndim == 2 else v[a] - if not isinstance(val, (int, float, np.integer, np.floating)): - raise TaichiRuntimeTypeError.get(i, needed.dtype.to_string(), type(val)) - launch_ctx.set_arg_float(actual_argument_slot, float(val)) - actual_argument_slot += 1 + + def cast_func(x): + if not isinstance(x, (int, float, np.integer, np.floating)): + raise TaichiRuntimeTypeError.get(i, needed.dtype.to_string(), type(x)) + return float(x) + elif needed.dtype in primitive_types.integer_types: - for a in range(needed.n): - for b in range(needed.m): - if actual_argument_slot >= max_arg_num: - exceed_max_arg_num = True - break - val = v[a, b] if needed.ndim == 2 else v[a] - if not isinstance(val, (int, np.integer)): - raise TaichiRuntimeTypeError.get(i, needed.dtype.to_string(), type(val)) - if is_signed(needed.dtype): - launch_ctx.set_arg_int(actual_argument_slot, int(val)) - else: - launch_ctx.set_arg_uint(actual_argument_slot, int(val)) - actual_argument_slot += 1 + + def cast_func(x): + if not isinstance(x, (int, np.integer)): + raise TaichiRuntimeTypeError.get(i, needed.dtype.to_string(), type(x)) + return int(x) + else: raise ValueError(f"Matrix dtype {needed.dtype} is not integer type or real type.") - continue + + if needed.ndim == 2: + v = [cast_func(v[i, j]) for i in range(needed.n) for j in range(needed.m)] + else: + v = [cast_func(v[i]) for i in range(needed.n)] + v = needed(*v) + needed.set_kernel_struct_args(v, launch_ctx, (actual_argument_slot,)) elif isinstance(needed, StructType): needed.set_kernel_struct_args(v, launch_ctx, (actual_argument_slot,)) else: diff --git a/python/taichi/math/mathimpl.py b/python/taichi/math/mathimpl.py index f445e4fd556a0..947cea1fb71e9 100644 --- a/python/taichi/math/mathimpl.py +++ b/python/taichi/math/mathimpl.py @@ -2,7 +2,7 @@ """ Math functions for glsl-like functions and other stuff. """ -from math import e, inf, nan, pi +import math from taichi.lang import impl, ops from taichi.lang.impl import static, zero @@ -31,6 +31,26 @@ cfg = impl.default_cfg +e = math.e +"""The mathematical constant e = 2.718281…. +Directly imported from the Python standard library `math`. +""" + +pi = math.pi +"""The mathematical constant π = 3.141592…. +Directly imported from the Python standard library `math`. +""" + +inf = math.inf +"""A floating-point positive infinity. (For negative infinity, use `-inf`). +Directly imported from the Python standard library `math`. +""" + +nan = math.nan +"""A floating-point "not a number" (NaN) value. +Directly imported from the Python standard library `math` +""" + vec2 = vector(2, cfg().default_fp) """2D floating vector type. """ @@ -84,7 +104,7 @@ def mix(x, y, a): """Performs a linear interpolation between `x` and `y` using `a` to weight between them. The return value is computed as - :math:`x\times a + (1-a)\times y`. + `x * (1 - a) + a * y`. The arguments can be scalars or :class:`~taichi.Matrix`, as long as the operation can be performed. diff --git a/taichi/aot/graph_data.cpp b/taichi/aot/graph_data.cpp index 2423f864e3cc9..caa03f5e2fb97 100644 --- a/taichi/aot/graph_data.cpp +++ b/taichi/aot/graph_data.cpp @@ -44,6 +44,41 @@ void CompiledGraph::init_runtime_context( LaunchContextBuilder &ctx) { for (int i = 0; i < paramter_list.size(); ++i) { auto &symbolic_arg = paramter_list[i]; + if (symbolic_arg.tag == aot::ArgKind::kMatrix) { + int size = symbolic_arg.element_shape[0] * symbolic_arg.element_shape[1]; + for (int j = 0; j < size; j++) { + auto found = args.find(symbolic_arg.name + "_" + std::to_string(j)); + TI_ERROR_IF(found == args.end(), "Missing runtime value for {}", + symbolic_arg.name); + const aot::IValue &ival = found->second; + TI_ASSERT(ival.tag == aot::ArgKind::kScalar); + int type_size = data_type_size(symbolic_arg.dtype()); + switch (type_size) { + case 1: + ctx.set_struct_arg_impl( + {i, j}, taichi_union_cast_with_different_sizes(ival.val)); + break; + case 2: + ctx.set_struct_arg_impl( + {i, j}, + taichi_union_cast_with_different_sizes(ival.val)); + break; + case 4: + ctx.set_struct_arg_impl( + {i, j}, + taichi_union_cast_with_different_sizes(ival.val)); + break; + case 8: + ctx.set_struct_arg_impl( + {i, j}, + taichi_union_cast_with_different_sizes(ival.val)); + break; + default: + TI_ERROR("Unsupported type size {}", type_size); + } + } + continue; + } auto found = args.find(symbolic_arg.name); TI_ERROR_IF(found == args.end(), "Missing runtime value for {}", symbolic_arg.name); @@ -89,8 +124,7 @@ void CompiledGraph::init_runtime_context( symbolic_arg.name, symbolic_arg_primitive_dtype.to_string(), arr_primitive_dtype.to_string()); ctx.set_arg_ndarray(i, *arr); - } else if (symbolic_arg.tag == aot::ArgKind::kScalar || - symbolic_arg.tag == aot::ArgKind::kMatrix) { + } else if (symbolic_arg.tag == aot::ArgKind::kScalar) { TI_ASSERT(ival.tag == aot::ArgKind::kScalar); // Matrix args are flattened so they're same as scalars. int type_size = data_type_size(symbolic_arg.dtype()); diff --git a/taichi/codegen/llvm/codegen_llvm.cpp b/taichi/codegen/llvm/codegen_llvm.cpp index 28e95d8f729e5..2c2bea8285827 100644 --- a/taichi/codegen/llvm/codegen_llvm.cpp +++ b/taichi/codegen/llvm/codegen_llvm.cpp @@ -1898,10 +1898,11 @@ void TaskCodeGenLLVM::visit(ExternalPtrStmt *stmt) { (layout == ExternalArrayLayout::kAOS) ? num_array_args : 0; for (int i = 0; i < num_array_args; i++) { - auto raw_arg = - builder->CreateGEP(struct_type, llvm_val[stmt->base_ptr], - {tlctx->get_constant(0), tlctx->get_constant(0), - tlctx->get_constant(i)}); + auto raw_arg = builder->CreateGEP( + struct_type, llvm_val[stmt->base_ptr], + {tlctx->get_constant(0), + tlctx->get_constant(TypeFactory::SHAPE_POS_IN_NDARRAY), + tlctx->get_constant(i)}); raw_arg = builder->CreateLoad(tlctx->get_data_type(PrimitiveType::i32), raw_arg); sizes[i] = raw_arg; @@ -1971,16 +1972,8 @@ void TaskCodeGenLLVM::visit(ExternalPtrStmt *stmt) { void TaskCodeGenLLVM::visit(ExternalTensorShapeAlongAxisStmt *stmt) { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; - if (auto struct_type = current_callable->args_type->get_element_type({arg_id}) - ->cast()) { - // Is ndarray - llvm_val[stmt] = get_struct_arg({arg_id, 0, axis}, /*create_load=*/true); - } else { - // Is texture - llvm_val[stmt] = - call("RuntimeContext_get_extra_args", get_context(), - tlctx->get_constant(arg_id), tlctx->get_constant(axis)); - } + llvm_val[stmt] = get_struct_arg( + {arg_id, TypeFactory::SHAPE_POS_IN_NDARRAY, axis}, /*create_load=*/true); } std::string TaskCodeGenLLVM::init_offloaded_task_function(OffloadedStmt *stmt, diff --git a/taichi/codegen/spirv/kernel_utils.cpp b/taichi/codegen/spirv/kernel_utils.cpp index e7cd29c3ab95e..d485e3269c2cb 100644 --- a/taichi/codegen/spirv/kernel_utils.cpp +++ b/taichi/codegen/spirv/kernel_utils.cpp @@ -50,9 +50,7 @@ std::string TaskAttributes::BufferBind::debug_string() const { KernelContextAttributes::KernelContextAttributes( const Kernel &kernel, const DeviceCapabilityConfig *caps) - : args_bytes_(0), - rets_bytes_(0), - extra_args_bytes_(RuntimeContext::extra_args_size) { + : args_bytes_(0), rets_bytes_(0) { arr_access.resize(kernel.parameter_list.size(), irpass::ExternalPtrAccess(0)); arg_attribs_vec_.reserve(kernel.parameter_list.size()); // TODO: We should be able to limit Kernel args and rets to be primitive types diff --git a/taichi/codegen/spirv/kernel_utils.h b/taichi/codegen/spirv/kernel_utils.h index 53990125b8900..16fce6aa35fb0 100644 --- a/taichi/codegen/spirv/kernel_utils.h +++ b/taichi/codegen/spirv/kernel_utils.h @@ -229,23 +229,6 @@ class KernelContextAttributes { return rets_bytes_; } - /** - * Number of bytes needed by the extra arguments. - * - * Extra argument region is used to store some metadata, like the shape of the - * external array. - */ - inline size_t extra_args_bytes() const { - return extra_args_bytes_; - } - - /** - * Offset (in bytes) of the extra arguments in the memory. - */ - inline size_t extra_args_mem_offset() const { - return args_bytes(); - } - /** * The type of the struct that contains all the arguments. */ @@ -266,7 +249,6 @@ class KernelContextAttributes { ret_attribs_vec_, args_bytes_, rets_bytes_, - extra_args_bytes_, arr_access, args_type_, rets_type_); @@ -277,7 +259,6 @@ class KernelContextAttributes { size_t args_bytes_{0}; size_t rets_bytes_{0}; - size_t extra_args_bytes_{0}; const lang::StructType *args_type_{nullptr}; const lang::StructType *rets_type_{nullptr}; diff --git a/taichi/codegen/spirv/spirv_codegen.cpp b/taichi/codegen/spirv/spirv_codegen.cpp index 8d266326ddeef..26c3cffd41447 100644 --- a/taichi/codegen/spirv/spirv_codegen.cpp +++ b/taichi/codegen/spirv/spirv_codegen.cpp @@ -633,30 +633,18 @@ class TaskCodegen : public IRVisitor { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; - const auto extra_args_member_index = ctx_attribs_->args().size(); - - const auto extra_arg_index = (arg_id * taichi_max_num_indices) + axis; spirv::Value var_ptr; - if (ctx_attribs_->args_type() - ->get_element_type({arg_id}) - ->is()) { - // Is ndarray - var_ptr = ir_->make_value( - spv::OpAccessChain, - ir_->get_pointer_type(ir_->i32_type(), spv::StorageClassUniform), - get_buffer_value(BufferType::Args, PrimitiveType::i32), - ir_->int_immediate_number(ir_->i32_type(), arg_id), - ir_->int_immediate_number(ir_->i32_type(), 0), - ir_->int_immediate_number(ir_->i32_type(), axis)); - } else { - // Is texture - var_ptr = ir_->make_value( - spv::OpAccessChain, - ir_->get_pointer_type(ir_->i32_type(), spv::StorageClassUniform), - get_buffer_value(BufferType::Args, PrimitiveType::i32), - ir_->int_immediate_number(ir_->i32_type(), - extra_args_member_index + extra_arg_index)); - } + TI_ASSERT(ctx_attribs_->args_type() + ->get_element_type({arg_id}) + ->is()); + var_ptr = ir_->make_value( + spv::OpAccessChain, + ir_->get_pointer_type(ir_->i32_type(), spv::StorageClassUniform), + get_buffer_value(BufferType::Args, PrimitiveType::i32), + ir_->int_immediate_number(ir_->i32_type(), arg_id), + ir_->int_immediate_number(ir_->i32_type(), + TypeFactory::SHAPE_POS_IN_NDARRAY), + ir_->int_immediate_number(ir_->i32_type(), axis)); spirv::Value var = ir_->load_variable(var_ptr, ir_->i32_type()); ir_->register_value(name, var); @@ -685,7 +673,8 @@ class TaskCodegen : public IRVisitor { ir_->get_pointer_type(ir_->i32_type(), spv::StorageClassUniform), get_buffer_value(BufferType::Args, PrimitiveType::i32), ir_->int_immediate_number(ir_->i32_type(), arg_id), - ir_->int_immediate_number(ir_->i32_type(), 0), + ir_->int_immediate_number(ir_->i32_type(), + TypeFactory::SHAPE_POS_IN_NDARRAY), ir_->int_immediate_number(ir_->i32_type(), i)); spirv::Value var = ir_->load_variable(var_ptr, ir_->i32_type()); ir_->register_value(var_name, var); @@ -2236,11 +2225,6 @@ class TaskCodegen : public IRVisitor { element_types.push_back( translate_ti_type(blk, element.type, has_buffer_ptr)); } - const tinyir::Type *i32_type = - blk.emplace_back(/*num_bits=*/32, /*is_signed=*/true); - for (int i = 0; i < ctx_attribs_->extra_args_bytes() / 4; i++) { - element_types.push_back(i32_type); - } const tinyir::Type *struct_type = blk.emplace_back(element_types); diff --git a/taichi/ir/type_factory.cpp b/taichi/ir/type_factory.cpp index 3e3e51e877c78..b63aee0662767 100644 --- a/taichi/ir/type_factory.cpp +++ b/taichi/ir/type_factory.cpp @@ -188,6 +188,10 @@ const Type *TypeFactory::get_ndarray_struct_type(DataType dt, return get_struct_type(members); } +const Type *TypeFactory::get_rwtexture_struct_type() { + return get_ndarray_struct_type(PrimitiveType::f32, 3); +} + namespace { static bool compare_types(DataType x, DataType y) { // Is the first type "bigger" than the second type? diff --git a/taichi/ir/type_factory.h b/taichi/ir/type_factory.h index 897644fa3fded..f73343c78519f 100644 --- a/taichi/ir/type_factory.h +++ b/taichi/ir/type_factory.h @@ -29,6 +29,8 @@ class TypeFactory { int total_dim, bool needs_grad = false); + const Type *get_rwtexture_struct_type(); + Type *get_pointer_type(Type *element, bool is_bit_pointer = false); Type *get_quant_int_type(int num_bits, bool is_signed, Type *compute_type); @@ -54,6 +56,7 @@ class TypeFactory { static DataType create_tensor_type(std::vector shape, DataType element); + constexpr static int SHAPE_POS_IN_NDARRAY = 0; constexpr static int DATA_PTR_POS_IN_NDARRAY = 1; constexpr static int GRAD_PTR_POS_IN_NDARRAY = 2; diff --git a/taichi/program/callable.cpp b/taichi/program/callable.cpp index b3af705acad64..384a3c31059a6 100644 --- a/taichi/program/callable.cpp +++ b/taichi/program/callable.cpp @@ -46,8 +46,11 @@ int Callable::insert_ndarray_param(const DataType &dt, int Callable::insert_texture_param(int total_dim, const std::string &name) { // FIXME: we shouldn't abuse is_array for texture parameters - parameter_list.emplace_back(PrimitiveType::f32, /*is_array=*/true, 0, - total_dim, std::vector{}); + // FIXME: using rwtexture struct type for texture parameters because C-API + // does not distinguish between texture and rwtexture. + auto *type = TypeFactory::get_instance().get_rwtexture_struct_type(); + parameter_list.emplace_back(type, /*is_array=*/true, 0, total_dim, + std::vector{}); parameter_list.back().name = name; return (int)parameter_list.size() - 1; } @@ -63,8 +66,9 @@ int Callable::insert_rw_texture_param(int total_dim, BufferFormat format, const std::string &name) { // FIXME: we shouldn't abuse is_array for texture parameters - parameter_list.emplace_back(PrimitiveType::f32, /*is_array=*/true, 0, - total_dim, std::vector{}, format); + auto *type = TypeFactory::get_instance().get_rwtexture_struct_type(); + parameter_list.emplace_back(type, /*is_array=*/true, 0, total_dim, + std::vector{}, format); parameter_list.back().name = name; return (int)parameter_list.size() - 1; } diff --git a/taichi/program/context.h b/taichi/program/context.h index 3b5a8412292f1..71ab23bc9644f 100644 --- a/taichi/program/context.h +++ b/taichi/program/context.h @@ -17,7 +17,6 @@ struct RuntimeContext { LLVMRuntime *runtime{nullptr}; - int32_t extra_args[taichi_max_num_args_extra][taichi_max_num_indices]; int32_t cpu_thread_id; // We move the pointer of result buffer from LLVMRuntime to RuntimeContext @@ -25,8 +24,6 @@ struct RuntimeContext { // LLVMRuntime is shared among functions. So we moved the pointer to // RuntimeContext which each function have one. uint64_t *result_buffer; - - static constexpr size_t extra_args_size = sizeof(extra_args); }; #if defined(TI_RUNTIME_HOST) diff --git a/taichi/program/launch_context_builder.cpp b/taichi/program/launch_context_builder.cpp index e57c8f21b4be0..1458ae81d6203 100644 --- a/taichi/program/launch_context_builder.cpp +++ b/taichi/program/launch_context_builder.cpp @@ -135,10 +135,6 @@ void LaunchContextBuilder::set_arg(int i, TypedConstant d) { } } -void LaunchContextBuilder::set_extra_arg_int(int i, int j, int32 d) { - ctx_->extra_args[i][j] = d; -} - template void LaunchContextBuilder::set_struct_arg_impl(std::vector arg_indices, T v) { @@ -257,8 +253,8 @@ void LaunchContextBuilder::set_arg_rw_texture_impl( array_ptrs[{arg_id}] = (void *)alloc_ptr; set_array_device_allocation_type(arg_id, DevAllocType::kRWTexture); TI_ASSERT(shape.size() <= taichi_max_num_indices); - for (int i = 0; i < shape.size(); i++) { - ctx_->extra_args[arg_id][i] = shape[i]; + for (int i = 0; i < shape.size(); ++i) { + set_struct_arg({arg_id, 0, i}, shape[i]); } } diff --git a/taichi/program/launch_context_builder.h b/taichi/program/launch_context_builder.h index 55d9f212fe746..ee0687d490b2c 100644 --- a/taichi/program/launch_context_builder.h +++ b/taichi/program/launch_context_builder.h @@ -57,7 +57,6 @@ class LaunchContextBuilder { template T get_ret(int i); - void set_extra_arg_int(int i, int j, int32 d); void set_arg_external_array_with_shape(int arg_id, uintptr_t ptr, diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index a05658a18dbf1..04301f154e5d6 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -617,66 +617,55 @@ void export_lang(py::module &m) { .def("seq", &GraphBuilder::seq, py::return_value_policy::reference); py::class_(m, "CompiledGraph") - .def("jit_run", [](aot::CompiledGraph *self, - const CompileConfig &compile_config, - const py::dict &pyargs) { - std::unordered_map args; - for (auto it : pyargs) { - std::string arg_name = py::cast(it.first); - auto tag = self->args[arg_name].tag; - if (tag == aot::ArgKind::kNdarray) { - auto &val = it.second.cast(); - args.insert( - {py::cast(it.first), aot::IValue::create(val)}); - } else if (tag == aot::ArgKind::kTexture || - tag == aot::ArgKind::kRWTexture) { - auto &val = it.second.cast(); - args.insert( - {py::cast(it.first), aot::IValue::create(val)}); - - } else if (tag == aot::ArgKind::kScalar || - tag == aot::ArgKind::kMatrix) { - std::string arg_name = py::cast(it.first); - auto expected_dtype = self->args[arg_name].dtype(); - if (expected_dtype == PrimitiveType::i32) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::i64) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::f32) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::f64) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::i16) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::u32) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::u64) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::u16) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::u8) { - args.insert({arg_name, - aot::IValue::create(py::cast(it.second))}); - } else if (expected_dtype == PrimitiveType::i8) { - args.insert( - {arg_name, aot::IValue::create(py::cast(it.second))}); - } else { - TI_NOT_IMPLEMENTED; - } - } else { - TI_NOT_IMPLEMENTED; - } - } - self->jit_run(compile_config, args); - }); + .def("jit_run", + [](aot::CompiledGraph *self, const CompileConfig &compile_config, + const py::dict &pyargs) { + std::unordered_map args; + auto insert_scalar_arg = [&args](std::string arg_name, + DataType expected_dtype, + py::object pyarg) { + auto type_id = expected_dtype->as()->type; + switch (type_id) { +#define PER_C_TYPE(type, ctype) \ + case PrimitiveTypeID::type: \ + args.insert({arg_name, aot::IValue::create(py::cast(pyarg))}); \ + break; +#include "taichi/inc/data_type_with_c_type.inc.h" +#undef PER_C_TYPE + default: + TI_ERROR("Unsupported scalar type {}", type_id); + } + }; + for (const auto &[arg_name, arg] : self->args) { + auto tag = arg.tag; + if (tag == aot::ArgKind::kMatrix) { + int size = arg.element_shape[0] * arg.element_shape[1]; + for (int i = 0; i < size; i++) { + auto name = fmt::format("{}_{}", arg_name, i); + TI_ASSERT(pyargs.contains(name.c_str())); + auto pyarg = pyargs[name.c_str()]; + insert_scalar_arg(name, arg.dtype(), pyarg); + } + continue; + } + TI_ASSERT(pyargs.contains(arg_name.c_str())); + auto pyarg = pyargs[arg_name.c_str()]; + if (tag == aot::ArgKind::kNdarray) { + auto &val = pyarg.cast(); + args.insert({arg_name, aot::IValue::create(val)}); + } else if (tag == aot::ArgKind::kTexture || + tag == aot::ArgKind::kRWTexture) { + auto &val = pyarg.cast(); + args.insert({arg_name, aot::IValue::create(val)}); + } else if (tag == aot::ArgKind::kScalar) { + auto expected_dtype = arg.dtype(); + insert_scalar_arg(arg_name, expected_dtype, pyarg); + } else { + TI_NOT_IMPLEMENTED; + } + } + self->jit_run(compile_config, args); + }); py::class_(m, "Kernel") .def("no_activate", @@ -722,7 +711,6 @@ void export_lang(py::module &m) { &LaunchContextBuilder::set_arg_ndarray_with_grad) .def("set_arg_texture", &LaunchContextBuilder::set_arg_texture) .def("set_arg_rw_texture", &LaunchContextBuilder::set_arg_rw_texture) - .def("set_extra_arg_int", &LaunchContextBuilder::set_extra_arg_int) .def("get_struct_ret_int", &LaunchContextBuilder::get_struct_ret_int) .def("get_struct_ret_uint", &LaunchContextBuilder::get_struct_ret_uint) .def("get_struct_ret_float", &LaunchContextBuilder::get_struct_ret_float); diff --git a/taichi/runtime/gfx/runtime.cpp b/taichi/runtime/gfx/runtime.cpp index 1c028a1924979..c49a912226a36 100644 --- a/taichi/runtime/gfx/runtime.cpp +++ b/taichi/runtime/gfx/runtime.cpp @@ -93,11 +93,6 @@ class HostDeviceContextBlitter { std::memcpy(device_base, host_ctx_.get_context().arg_buffer, ctx_attribs_->args_bytes()); - void *device_ptr = - (uint8_t *)device_base + ctx_attribs_->extra_args_mem_offset(); - std::memcpy(device_ptr, host_ctx_.get_context().extra_args, - ctx_attribs_->extra_args_bytes()); - device_->unmap(*device_args_buffer_); } @@ -258,10 +253,6 @@ CompiledTaichiKernel::CompiledTaichiKernel(const Params &ti_params) args_buffer_size_ = arg_sz; ret_buffer_size_ = ret_sz; - if (arg_sz) { - args_buffer_size_ += ti_kernel_attribs_.ctx_attribs.extra_args_bytes(); - } - const auto &task_attribs = ti_kernel_attribs_.tasks_attribs; const auto &spirv_bins = ti_params.spirv_bins; TI_ASSERT(task_attribs.size() == spirv_bins.size()); @@ -802,15 +793,14 @@ std::pair GfxRuntime::get_struct_type_with_data_layout(const lang::StructType *old_ty, const std::string &layout) { auto [new_ty, size, align] = - get_struct_type_with_data_layout_impl(old_ty, layout, true); + get_struct_type_with_data_layout_impl(old_ty, layout); return {new_ty, size}; } std::tuple GfxRuntime::get_struct_type_with_data_layout_impl( const lang::StructType *old_ty, - const std::string &layout, - bool is_outmost) { + const std::string &layout) { TI_TRACE("get_struct_type_with_data_layout: {}", layout); TI_ASSERT(layout.size() == 2); auto is_430 = layout[0] == '4'; @@ -824,7 +814,7 @@ GfxRuntime::get_struct_type_with_data_layout_impl( size_t member_size; if (auto struct_type = member.type->cast()) { auto [new_ty, size, member_align_] = - get_struct_type_with_data_layout_impl(struct_type, layout, false); + get_struct_type_with_data_layout_impl(struct_type, layout); members[i].type = new_ty; member_align = member_align_; member_size = size; @@ -863,7 +853,7 @@ GfxRuntime::get_struct_type_with_data_layout_impl( if (!is_430) { align = align_up(align, sizeof(float) * 4); - bytes = align_up(bytes, is_outmost ? 4 : 4 * sizeof(float)); + bytes = align_up(bytes, 4 * sizeof(float)); } TI_TRACE(" total_bytes={}", bytes); return {TypeFactory::get_instance() diff --git a/taichi/runtime/gfx/runtime.h b/taichi/runtime/gfx/runtime.h index d52de3c2ae320..725ef0aa81394 100644 --- a/taichi/runtime/gfx/runtime.h +++ b/taichi/runtime/gfx/runtime.h @@ -135,8 +135,7 @@ class TI_DLL_EXPORT GfxRuntime { static std::tuple get_struct_type_with_data_layout_impl(const lang::StructType *old_ty, - const std::string &layout, - bool is_outmost); + const std::string &layout); private: friend class taichi::lang::gfx::SNodeTreeManager; diff --git a/taichi/runtime/llvm/runtime_module/runtime.cpp b/taichi/runtime/llvm/runtime_module/runtime.cpp index f7cb28443d824..8be6e4f69e8c4 100644 --- a/taichi/runtime/llvm/runtime_module/runtime.cpp +++ b/taichi/runtime/llvm/runtime_module/runtime.cpp @@ -288,10 +288,6 @@ STRUCT_FIELD_ARRAY(PhysicalCoordinates, val); STRUCT_FIELD(RuntimeContext, runtime); STRUCT_FIELD(RuntimeContext, result_buffer) -int32 RuntimeContext_get_extra_args(RuntimeContext *ctx, int32 i, int32 j) { - return ctx->extra_args[i][j]; -} - #include "taichi/runtime/llvm/runtime_module/atomic.h" // These structures are accessible by both the LLVM backend and this C++ runtime diff --git a/tests/cpp/aot/gfx_utils.cpp b/tests/cpp/aot/gfx_utils.cpp index e7e15b5fde434..9a0d965321316 100644 --- a/tests/cpp/aot/gfx_utils.cpp +++ b/tests/cpp/aot/gfx_utils.cpp @@ -172,7 +172,7 @@ void run_kernel_test1(Arch arch, taichi::lang::Device *device) { // Hack to set vector/matrix args std::vector vec = {1, 2, 3}; for (int i = 0; i < vec.size(); ++i) { - builder.set_arg(/*arg_id=*/i + 2, vec[i]); + builder.set_struct_arg(/*arg_indices=*/{2, i}, vec[i]); } k_run->launch(builder); gfx_runtime->synchronize(); diff --git a/tests/cpp/aot/llvm/kernel_aot_test.cpp b/tests/cpp/aot/llvm/kernel_aot_test.cpp index 4dcdce34fc23d..5446cb8219cff 100644 --- a/tests/cpp/aot/llvm/kernel_aot_test.cpp +++ b/tests/cpp/aot/llvm/kernel_aot_test.cpp @@ -54,7 +54,7 @@ TEST(LlvmAotTest, CpuKernel) { builder.set_arg_ndarray(/*arg_id=*/1, arr); std::vector vec = {1, 2, 3}; for (int i = 0; i < vec.size(); ++i) { - builder.set_arg(/*arg_id=*/i + 2, vec[i]); + builder.set_struct_arg(/*arg_indices=*/{2, i}, vec[i]); } k_run->launch(builder); @@ -100,7 +100,7 @@ TEST(LlvmAotTest, CudaKernel) { builder.set_arg_ndarray(/*arg_id=*/1, arr); std::vector vec = {1, 2, 3}; for (int i = 0; i < vec.size(); ++i) { - builder.set_arg(/*arg_id=*/i + 2, vec[i]); + builder.set_struct_arg(/*arg_indices=*/{2, i}, vec[i]); } k_run->launch(builder); diff --git a/tests/python/test_argument.py b/tests/python/test_argument.py index 4a7139b1f328a..9d1c0d078e9ef 100644 --- a/tests/python/test_argument.py +++ b/tests/python/test_argument.py @@ -4,32 +4,6 @@ from tests import test_utils -@test_utils.test(exclude=[ti.opengl, ti.gles]) -def test_exceed_max_64(): - N = 64 - - @ti.kernel - def foo1(a: ti.types.vector(N, ti.i32)) -> ti.i32: - return a.sum() - - A = ti.Vector([1] * N) - assert foo1(A) == 64 - - N = 65 - - @ti.kernel - def foo2(a: ti.types.vector(N, ti.i32)) -> ti.i32: - return a.sum() - - A = ti.Vector([1] * N) - - with pytest.raises( - ti.TaichiRuntimeError, - match=f"The number of elements in kernel arguments is too big! Do not exceed 64 on {ti._lib.core.arch_name(ti.lang.impl.current_cfg().arch)} backend.", - ): - foo2(A) - - @test_utils.test(debug=True) def test_kernel_keyword_args(): @ti.kernel diff --git a/tests/python/test_deprecation.py b/tests/python/test_deprecation.py index c91455c0c9e8d..50fa6653f96e0 100644 --- a/tests/python/test_deprecation.py +++ b/tests/python/test_deprecation.py @@ -83,21 +83,6 @@ def test_deprecate_rwtexture_ndim(): ti.graph.Arg(ti.graph.ArgKind.RWTEXTURE, "x", shape=(128, 128), fmt=ti.Format.r32f) -@test_utils.test() -def test_deprecate_builtin_min_max(): - with pytest.warns( - DeprecationWarning, - match='Calling builtin function "max" in Taichi scope is deprecated, ' - "and it will be removed in Taichi v1.6.0.", - ): - - @ti.kernel - def func(): - max(1, 2) - - func() - - @test_utils.test() def test_remove_is_is_not(): with pytest.raises(ti.TaichiSyntaxError, match='Operator "is" in Taichi scope is not supported'): diff --git a/tests/python/test_native_functions.py b/tests/python/test_native_functions.py index 2176f19aecb9b..fa60291bfdb09 100644 --- a/tests/python/test_native_functions.py +++ b/tests/python/test_native_functions.py @@ -72,9 +72,7 @@ def func(): y[i] = N - i z[i] = i - 2 if i % 2 else i + 2 - with pytest.warns(DeprecationWarning, match="Calling builtin function") as records: - func() - assert len(records) > 0 + func() assert np.allclose( minimum.to_numpy(),