From 28550c83685d57381d9ede9d6ffb19723eaf9a01 Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 11 Aug 2023 16:54:17 +0200 Subject: [PATCH 01/12] Added wrapper generator from ocaml-torch 0.14 Copied from https://github.com/LaurentMazare/ocaml-torch/tree/a6499811f40282a071179d4306afbbb6023dcc4a/src/gen/gen.ml Also updated dune-project accordingly. --- deps/c_wrapper_generator/bin/dune | 3 +- deps/c_wrapper_generator/bin/main.ml | 355 +++++++++++++++--- deps/c_wrapper_generator/dune-project | 2 +- .../wrapper_generator.opam | 1 + 4 files changed, 303 insertions(+), 58 deletions(-) diff --git a/deps/c_wrapper_generator/bin/dune b/deps/c_wrapper_generator/bin/dune index 869412a7..0cfaea71 100644 --- a/deps/c_wrapper_generator/bin/dune +++ b/deps/c_wrapper_generator/bin/dune @@ -1,4 +1,5 @@ (executable (public_name wrapper_generator) (name main) - (libraries base stdio yaml)) + (libraries base stdio yaml) + (preprocess (pps ppx_string))) diff --git a/deps/c_wrapper_generator/bin/main.ml b/deps/c_wrapper_generator/bin/main.ml index 91b7d5a4..1c15ec2d 100644 --- a/deps/c_wrapper_generator/bin/main.ml +++ b/deps/c_wrapper_generator/bin/main.ml @@ -1,4 +1,4 @@ -(* Automatically generate the C++ -> C bindings. +(* Automatically generate the C++ -> C -> ocaml bindings. This takes as input the Descriptions.yaml file that gets generated when building PyTorch from source. *) @@ -22,12 +22,40 @@ let excluded_functions = ; "_cufft_set_plan_cache_max_size" ; "_cufft_clear_plan_cache" ; "backward" + ; "_backward" ; "set_data" + ; "_amp_non_finite_check_and_unscale_" + ; "_cummin_helper" + ; "_cummax_helper" + ; "retain_grad" + ; "_validate_sparse_coo_tensor_args" + ; "_validate_sparse_csr_tensor_args" + ; "count_nonzero" + ; "_assert_async" + ; "gradient" + ; "linalg_vector_norm" + ; "linalg_vector_norm_out" + ; "linalg_matrix_norm" + ; "linalg_matrix_norm_out" + ; "histogram" + ; "histogram_out" ] -let excluded_prefixes = [ "_"; "thnn_"; "th_" ] +let no_tensor_options = + Set.of_list + (module String) + [ "zeros_like" + ; "empty_like" + ; "full_like" + ; "ones_like" + ; "rand_like" + ; "randint_like" + ; "randn_like" + ] + +let excluded_prefixes = [ "thnn_"; "th_"; "_foreach"; "_amp_foreach"; "linalg_norm" ] let excluded_suffixes = [ "_forward"; "_forward_out" ] -let yaml_error yaml ~msg = Printf.failwithf "%s, %s" msg (Yaml.to_string_exn yaml) () +let yaml_error yaml ~msg = failwith [%string "%{msg}, %{Yaml.to_string_exn yaml}"] let extract_bool = function | `Bool b -> b @@ -58,11 +86,13 @@ module Func = struct | Tensor | TensorOption | IntList + | TensorOptList | TensorList | TensorOptions | Scalar | ScalarType | Device + | String type arg = { arg_name : string @@ -70,8 +100,31 @@ module Func = struct ; default_value : string option } + let ml_arg_type arg = + match arg.arg_type with + | Bool -> "bool" + | Int64 -> if String.( = ) arg.arg_name "reduction" then "Reduction.t" else "int" + | Double -> "float" + | Tensor -> "t" + | TensorOption -> "t option" + | IntList -> "int list" + | TensorList -> "t list" + | TensorOptList -> "t option list" + | TensorOptions -> "Kind.packed * Device.t" + | Scalar -> "'a scalar" + | ScalarType -> "Kind.packed" + | Device -> "Device.t" + | String -> "string" + + let named_arg arg = + match arg.arg_name with + | "self" | "other" | "result" | "input" | "tensor" | "tensors" -> false + | _ -> true + type t = { name : string + ; operator_name : string + ; overload_name : string ; args : arg list ; returns : (* number of tensors that are returned *) [ `fixed of int | `dynamic ] @@ -83,22 +136,24 @@ module Func = struct | "bool" -> Some Bool | "int64_t" -> Some Int64 | "double" -> Some Double - | "booltensor" | "indextensor" | "tensor" -> - Some (if is_nullable then TensorOption else Tensor) - | "tensoroptions" -> Some TensorOptions - | "intarrayref" | "intlist" -> Some IntList - | "tensorlist" -> Some TensorList - | "device" -> Some Device - | "scalar" -> Some Scalar - | "scalartype" -> Some ScalarType + | "at::tensor" -> Some (if is_nullable then TensorOption else Tensor) + | "at::tensoroptions" -> Some TensorOptions + | "at::intarrayref" | "intlist" -> Some IntList + | "const c10::list> &" -> Some TensorOptList + | "at::tensorlist" -> Some TensorList + | "at::device" -> Some Device + | "at::scalar" | "const at::scalar &" -> Some Scalar + | "at::scalartype" -> Some ScalarType + | "c10::string_view" -> Some String | _ -> None let c_typed_args_list t = List.map t.args ~f:(fun { arg_name; arg_type; _ } -> match arg_type with - | IntList -> Printf.sprintf "int64_t *%s_data, int %s_len" arg_name arg_name - | TensorList -> Printf.sprintf "tensor *%s_data, int %s_len" arg_name arg_name - | TensorOptions -> Printf.sprintf "int %s_kind, int %s_device" arg_name arg_name + | IntList -> [%string "int64_t *%{arg_name}_data, int %{arg_name}_len"] + | TensorOptList | TensorList -> + [%string "tensor *%{arg_name}_data, int %{arg_name}_len"] + | TensorOptions -> [%string "int %{arg_name}_kind, int %{arg_name}_device"] | otherwise -> let simple_type_cstring = match otherwise with @@ -110,7 +165,8 @@ module Func = struct | ScalarType -> "int" | Device -> "int" | Scalar -> "scalar" - | IntList | TensorList | TensorOptions -> assert false + | String -> "char *" + | IntList | TensorOptList | TensorList | TensorOptions -> assert false in Printf.sprintf "%s %s" simple_type_cstring arg_name) |> String.concat ~sep:", " @@ -119,31 +175,88 @@ module Func = struct List.map args ~f:(fun { arg_name; arg_type; _ } -> match arg_type with | Scalar | Tensor -> "*" ^ arg_name - | TensorOption -> Printf.sprintf "(%s ? *%s : torch::Tensor())" arg_name arg_name + | TensorOption -> [%string "(%{arg_name} ? *%{arg_name} : torch::Tensor())"] | Bool -> "(bool)" ^ arg_name - | IntList -> - Printf.sprintf "torch::IntArrayRef(%s_data, %s_len)" arg_name arg_name - | TensorList -> - Printf.sprintf "of_carray_tensor(%s_data, %s_len)" arg_name arg_name + | IntList -> [%string "torch::IntArrayRef(%{arg_name}_data, %{arg_name}_len)"] + | String -> [%string "std::string(%{arg_name})"] + | TensorList -> [%string "of_carray_tensor(%{arg_name}_data, %{arg_name}_len)"] + | TensorOptList -> + Printf.sprintf "of_carray_tensor_opt(%s_data, %s_len)" arg_name arg_name | TensorOptions -> - Printf.sprintf - "at::device(device_of_int(%s_device)).dtype(at::ScalarType(%s_kind))" - arg_name - arg_name - | ScalarType -> Printf.sprintf "torch::ScalarType(%s)" arg_name - | Device -> Printf.sprintf "device_of_int(%s)" arg_name + [%string + "at::device(device_of_int(%{arg_name}_device)).dtype(at::ScalarType(%{arg_name}_kind))"] + | ScalarType -> [%string "torch::ScalarType(%{arg_name})"] + | Device -> [%string "device_of_int(%{arg_name})"] | _ -> arg_name) |> String.concat ~sep:", " let c_call t = match t.kind with - | `function_ -> Printf.sprintf "torch::%s(%s)" t.name (c_args_list t.args) + | `function_ -> [%string "torch::%{t.name}(%{c_args_list t.args})"] | `method_ -> (match t.args with - | head :: tail -> - Printf.sprintf "%s->%s(%s)" head.arg_name t.name (c_args_list tail) + | head :: tail -> [%string "%{head.arg_name}->%{t.name}(%{c_args_list tail})"] | [] -> - Printf.failwithf "Method calls should have at least one argument %s" t.name ()) + failwith [%string "Method calls should have at least one argument %{t.name}"]) + + let stubs_signature t = + let args = + List.concat_map t.args ~f:(fun arg -> + match arg.arg_type with + | Bool -> [ "int" ] + | Int64 -> [ "int64_t" ] + | Double -> [ "double" ] + | Tensor -> [ "t" ] + | TensorOption -> [ "t" ] + | TensorOptions -> [ "int"; "int" ] + | ScalarType -> [ "int" ] + | Device -> [ "int" ] + | IntList -> [ "ptr int64_t"; "int" ] + | TensorOptList | TensorList -> [ "ptr t"; "int" ] + | String -> [ "string" ] + | Scalar -> [ "scalar" ]) + |> String.concat ~sep:" @-> " + in + match t.returns with + | `fixed _ -> [%string "ptr t @-> %{args} @-> returning void"] + | `dynamic -> [%string "%{args} @-> returning (ptr t)"] + + let replace_map = + Map.of_alist_exn (module String) [ "end", "end_"; "to", "to_"; "t", "tr" ] + + let caml_name name = + Map.find replace_map name |> Option.value ~default:name |> String.lowercase + + let caml_args t = + List.map t.args ~f:(fun arg -> + if named_arg arg then "~" ^ caml_name arg.arg_name else caml_name arg.arg_name) + |> String.concat ~sep:" " + + let caml_binding_args t = + List.map t.args ~f:(fun arg -> + let name = caml_name arg.arg_name in + match arg.arg_type with + | IntList -> + [%string + {|(List.map Int64.of_int %{name} |> CArray.of_list int64_t |> CArray.start) (List.length %{name})|}] + | TensorList -> + [%string "(CArray.of_list t %{name} |> CArray.start) (List.length %{name})"] + | TensorOptList -> + [%string + "(List.map (function Some x -> x | None -> null) %{name} |> CArray.of_list t \ + |> CArray.start) (List.length %{name})"] + | Bool -> [%string "(if %{name} then 1 else 0)"] + | ScalarType -> [%string "(Kind.packed_to_int %{name})"] + | TensorOptions -> + [%string "(Kind.packed_to_int (fst %{name})) (Device.to_int (snd %{name}))"] + | Device -> [%string "(Device.to_int %{name})"] + | Int64 -> + if String.( = ) name "reduction" + then "(Reduction.to_int reduction |> Int64.of_int)" + else [%string "(Int64.of_int %{name})"] + | TensorOption -> [%string "(match %{name} with | Some v -> v | None -> null)"] + | _ -> name) + |> String.concat ~sep:" " end exception Not_a_simple_arg @@ -160,6 +273,8 @@ let read_yaml filename = List.filter_map funcs ~f:(fun yaml -> let map = extract_map yaml in let name = Map.find_exn map "name" |> extract_string in + let operator_name = Map.find_exn map "operator_name" |> extract_string in + let overload_name = Map.find_exn map "overload_name" |> extract_string in let deprecated = Map.find_exn map "deprecated" |> extract_bool in let method_of = Map.find_exn map "method_of" |> extract_list |> List.map ~f:extract_string @@ -169,9 +284,7 @@ let read_yaml filename = let is_tensor returns = let returns = extract_map returns in let return_type = Map.find_exn returns "dynamic_type" |> extract_string in - String.( = ) return_type "Tensor" - || String.( = ) return_type "BoolTensor" - || String.( = ) return_type "IndexTensor" + String.( = ) return_type "at::Tensor" in let returns = Map.find_exn map "returns" |> extract_list in if List.for_all returns ~f:is_tensor @@ -182,7 +295,12 @@ let read_yaml filename = let return_type = Map.find_exn (extract_map returns) "dynamic_type" |> extract_string in - if String.( = ) return_type "TensorList" then Some `dynamic else None + if String.( = ) return_type "at::TensorList" + || String.( = ) + return_type + "dynamic_type: const c10::List> &" + then Some `dynamic + else None | [] | _ :: _ :: _ -> None) in let kind = @@ -219,13 +337,16 @@ let read_yaml filename = match Func.arg_type_of_string arg_type ~is_nullable with | Some Scalar when Option.is_some default_value && not is_nullable -> None + | Some TensorOptions + when Option.is_some default_value + && Set.mem no_tensor_options name -> None | Some arg_type -> Some { Func.arg_name; arg_type; default_value } | None -> if Option.is_some default_value then None else raise Not_a_simple_arg) in - Some { Func.name; args; returns; kind } + Some { Func.name; operator_name; overload_name; args; returns; kind } with | Not_a_simple_arg -> None) else None) @@ -250,27 +371,24 @@ let write_cpp funcs filename = let c_typed_args_list = Func.c_typed_args_list func in match func.returns with | `dynamic -> - pc "int atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; + pc "tensor *atg_%s(%s) {" exported_name c_typed_args_list; pc " PROTECT("; pc " auto outputs__ = %s;" (Func.c_call func); (* the returned type is a C++ vector of tensors *) pc " int sz = outputs__.size();"; pc - " // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * \ + " torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * \ sizeof(torch::Tensor*));"; pc " for (int i = 0; i < sz; ++i)"; pc " out__[i] = new torch::Tensor(outputs__[i]);"; pc " out__[sz] = nullptr;"; - pc " // return out__;"; - pc " return 0;"; - pc ")"; - pc "return 1;"; + pc " return out__;"; + pc " )"; pc "}"; pc ""; - ph "// tensor *atg_%s(%s);" exported_name c_typed_args_list; - ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list + ph "tensor *atg_%s(%s);" exported_name c_typed_args_list | `fixed ntensors -> - pc "int atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; + pc "void atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; pc " PROTECT("; pc " auto outputs__ = %s;" (Func.c_call func); if ntensors = 1 @@ -279,15 +397,120 @@ let write_cpp funcs filename = for i = 0 to ntensors - 1 do pc " out__[%d] = new torch::Tensor(std::get<%d>(outputs__));" i i done; - pc " return 0;"; - pc ")"; - pc "return 1;"; + pc " )"; pc "}"; pc ""; - ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list))) + ph "void atg_%s(tensor *, %s);" exported_name c_typed_args_list))) + +let write_stubs funcs filename = + Out_channel.with_file filename ~f:(fun out_channel -> + let p s = p out_channel s in + p "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; + p ""; + p "open Ctypes"; + p ""; + let funcs = Map.to_alist funcs |> List.chunks_of ~length:100 in + List.iteri funcs ~f:(fun idx funcs -> + p "module C%d(F: Cstubs.FOREIGN) = struct" idx; + p " open F"; + p " type t = unit ptr"; + p " let t : t typ = ptr void"; + p " type scalar = unit ptr"; + p " let scalar : scalar typ = ptr void"; + List.iter funcs ~f:(fun (exported_name, func) -> + p " let stubs_%s =" (Func.caml_name exported_name); + p " foreign \"atg_%s\"" exported_name; + p " (%s)" (Func.stubs_signature func); + p ""); + p "end"); + p "module C(F: Cstubs.FOREIGN) = struct"; + List.iteri funcs ~f:(fun idx _funcs -> p " include C%d(F)" idx); + p "end") + +let write_wrapper funcs filename = + Out_channel.with_file (filename ^ ".ml") ~f:(fun out_ml -> + Out_channel.with_file (filename ^ "_intf.ml") ~f:(fun out_intf -> + let pm s = p out_ml s in + let pi s = p out_intf s in + pm "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; + pm ""; + pm "open Ctypes"; + pm ""; + pm "module C = Torch_bindings.C(Torch_generated)"; + pm "open C.TensorG"; + pm ""; + pm "let to_tensor_list ptr ="; + pm " let rec loop ptr acc ="; + pm " let tensor = !@ptr in"; + pm " if is_null tensor"; + pm " then acc"; + pm " else begin"; + pm " Gc.finalise C.Tensor.free tensor;"; + pm " loop (ptr +@ 1) (tensor :: acc)"; + pm " end"; + pm " in"; + pm " let result = loop ptr [] in"; + pm " C.free (to_voidp ptr);"; + pm " List.rev result"; + pm ""; + pi "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; + pi ""; + pi "module type S = sig"; + pi " type t"; + pi " type _ scalar"; + pi ""; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let caml_name = Func.caml_name exported_name in + pm "let %s %s =" caml_name (Func.caml_args func); + (match func.returns with + | `fixed ntensors -> + pm " let out__ = CArray.make t %d in" ntensors; + pm + " stubs_%s (CArray.start out__) %s;" + caml_name + (Func.caml_binding_args func); + for i = 0 to ntensors - 1 do + pm " let t%d = CArray.get out__ %d in" i i; + pm " Gc.finalise C.Tensor.free t%d;" i + done; + pm + " %s" + (List.init ntensors ~f:(Printf.sprintf "t%d") |> String.concat ~sep:", ") + | `dynamic -> + pm + " stubs_%s %s |> to_tensor_list" + caml_name + (Func.caml_binding_args func)); + pm ""; + pi " val %s :" caml_name; + List.iter func.args ~f:(fun arg -> + let named_arg = + if Func.named_arg arg + then Printf.sprintf "%s:" (Func.caml_name arg.arg_name) + else "" + in + pi " %s%s ->" named_arg (Func.ml_arg_type arg)); + let returns = + match func.returns with + | `fixed 1 -> "t" + | `fixed ntensors -> + List.init ntensors ~f:(fun _ -> "t") |> String.concat ~sep:" * " + | `dynamic -> "t list" + in + pi " %s" returns; + pi ""); + pi "end")) let methods = - let c name args = { Func.name; args; returns = `fixed 1; kind = `method_ } in + let c name args = + { Func.name + ; operator_name = name + ; overload_name = "" + ; args + ; returns = `fixed 1 + ; kind = `method_ + } + in let ca arg_name arg_type = { Func.arg_name; arg_type; default_value = None } in [ c "grad" [ ca "self" Tensor ] ; c "set_requires_grad" [ ca "self" Tensor; ca "r" Bool ] @@ -295,13 +518,13 @@ let methods = ; c "to" [ ca "self" Tensor; ca "device" Device ] ] -let run ~yaml_filename ~cpp_filename = +let run ~yaml_filename ~cpp_filename ~stubs_filename ~wrapper_filename = let funcs = read_yaml yaml_filename in let funcs = methods @ funcs in printf "Generating code for %d functions.\n%!" (List.length funcs); (* Generate some unique names for overloaded functions. *) let funcs = - List.map funcs ~f:(fun func -> String.lowercase func.name, func) + List.map funcs ~f:(fun func -> String.lowercase func.operator_name, func) |> Map.of_alist_multi (module String) |> Map.to_alist |> List.concat_map ~f:(fun (name, funcs) -> @@ -309,15 +532,35 @@ let run ~yaml_filename ~cpp_filename = | [] -> assert false | [ func ] -> [ name, func ] | funcs -> + let has_empty_overload = + List.exists funcs ~f:(fun (func : Func.t) -> + String.is_empty func.overload_name) + in List.sort funcs ~compare:(fun (f1 : Func.t) (f2 : Func.t) -> - Int.compare (List.length f1.args) (List.length f2.args)) - |> List.mapi ~f:(fun i func -> - (if i = 0 then name else Printf.sprintf "%s%d" name i), func)) + match Int.compare (String.length f1.name) (String.length f2.name) with + | 0 -> Int.compare (List.length f1.args) (List.length f2.args) + | cmp -> cmp) + |> List.mapi ~f:(fun index (func : Func.t) -> + let operator_name = String.lowercase func.operator_name in + let overload_name = String.lowercase func.overload_name in + let name = + if String.is_empty overload_name + || (index = 0 && not has_empty_overload) + then operator_name + else if String.is_suffix operator_name ~suffix:"_" + then operator_name ^ overload_name ^ "_" + else operator_name ^ "_" ^ overload_name + in + name, func)) |> Map.of_alist_exn (module String) in - write_cpp funcs cpp_filename + write_cpp funcs cpp_filename; + write_stubs funcs stubs_filename; + write_wrapper funcs wrapper_filename let () = run ~yaml_filename:"data/Declarations.yaml" - ~cpp_filename:"../c_wrapper/torch_api_generated" + ~cpp_filename:"src/wrapper/torch_api_generated" + ~stubs_filename:"src/stubs/torch_bindings_generated.ml" + ~wrapper_filename:"src/wrapper/wrapper_generated" diff --git a/deps/c_wrapper_generator/dune-project b/deps/c_wrapper_generator/dune-project index e187f54c..7fc5c54a 100644 --- a/deps/c_wrapper_generator/dune-project +++ b/deps/c_wrapper_generator/dune-project @@ -19,7 +19,7 @@ (name wrapper_generator) (synopsis "A short synopsis") (description "A longer description") - (depends ocaml dune base stdio yaml) + (depends ocaml dune base ppx_string stdio yaml) (tags (topics "to describe" your project))) diff --git a/deps/c_wrapper_generator/wrapper_generator.opam b/deps/c_wrapper_generator/wrapper_generator.opam index b6830326..7637177b 100644 --- a/deps/c_wrapper_generator/wrapper_generator.opam +++ b/deps/c_wrapper_generator/wrapper_generator.opam @@ -13,6 +13,7 @@ depends: [ "ocaml" "dune" {>= "3.4"} "base" + "ppx_string" "stdio" "yaml" "odoc" {with-doc} From 57ded88d5c4ed3a3b8271e6eda18d0d293c6f07f Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 26 Aug 2022 19:14:30 +0200 Subject: [PATCH 02/12] Adapted wrapper_generator to produce equivalent code to existing julia torch wrapper --- deps/c_wrapper_generator/bin/main.ml | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/deps/c_wrapper_generator/bin/main.ml b/deps/c_wrapper_generator/bin/main.ml index 1c15ec2d..b7d57f58 100644 --- a/deps/c_wrapper_generator/bin/main.ml +++ b/deps/c_wrapper_generator/bin/main.ml @@ -371,24 +371,27 @@ let write_cpp funcs filename = let c_typed_args_list = Func.c_typed_args_list func in match func.returns with | `dynamic -> - pc "tensor *atg_%s(%s) {" exported_name c_typed_args_list; + pc "int atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; pc " PROTECT("; pc " auto outputs__ = %s;" (Func.c_call func); (* the returned type is a C++ vector of tensors *) pc " int sz = outputs__.size();"; pc - " torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * \ + " // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * \ sizeof(torch::Tensor*));"; pc " for (int i = 0; i < sz; ++i)"; pc " out__[i] = new torch::Tensor(outputs__[i]);"; pc " out__[sz] = nullptr;"; - pc " return out__;"; - pc " )"; + pc " // return out__;"; + pc " return 0;"; + pc ")"; + pc "return 1;"; pc "}"; pc ""; - ph "tensor *atg_%s(%s);" exported_name c_typed_args_list + ph "// tensor *atg_%s(%s);" exported_name c_typed_args_list; + ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list | `fixed ntensors -> - pc "void atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; + pc "int atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; pc " PROTECT("; pc " auto outputs__ = %s;" (Func.c_call func); if ntensors = 1 @@ -397,10 +400,12 @@ let write_cpp funcs filename = for i = 0 to ntensors - 1 do pc " out__[%d] = new torch::Tensor(std::get<%d>(outputs__));" i i done; - pc " )"; + pc " return 0;"; + pc ")"; + pc "return 1;"; pc "}"; pc ""; - ph "void atg_%s(tensor *, %s);" exported_name c_typed_args_list))) + ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list))) let write_stubs funcs filename = Out_channel.with_file filename ~f:(fun out_channel -> From f374bf66eb2fd67b6564d7ead5efba8bbb4f4966 Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 26 Aug 2022 19:37:21 +0200 Subject: [PATCH 03/12] Removed writing of OCaml stubs and wrapper --- deps/c_wrapper_generator/bin/main.ml | 191 +-------------------------- 1 file changed, 4 insertions(+), 187 deletions(-) diff --git a/deps/c_wrapper_generator/bin/main.ml b/deps/c_wrapper_generator/bin/main.ml index b7d57f58..495182a1 100644 --- a/deps/c_wrapper_generator/bin/main.ml +++ b/deps/c_wrapper_generator/bin/main.ml @@ -1,4 +1,4 @@ -(* Automatically generate the C++ -> C -> ocaml bindings. +(* Automatically generate the C++ -> C bindings. This takes as input the Descriptions.yaml file that gets generated when building PyTorch from source. *) @@ -100,27 +100,6 @@ module Func = struct ; default_value : string option } - let ml_arg_type arg = - match arg.arg_type with - | Bool -> "bool" - | Int64 -> if String.( = ) arg.arg_name "reduction" then "Reduction.t" else "int" - | Double -> "float" - | Tensor -> "t" - | TensorOption -> "t option" - | IntList -> "int list" - | TensorList -> "t list" - | TensorOptList -> "t option list" - | TensorOptions -> "Kind.packed * Device.t" - | Scalar -> "'a scalar" - | ScalarType -> "Kind.packed" - | Device -> "Device.t" - | String -> "string" - - let named_arg arg = - match arg.arg_name with - | "self" | "other" | "result" | "input" | "tensor" | "tensors" -> false - | _ -> true - type t = { name : string ; operator_name : string @@ -198,65 +177,6 @@ module Func = struct | head :: tail -> [%string "%{head.arg_name}->%{t.name}(%{c_args_list tail})"] | [] -> failwith [%string "Method calls should have at least one argument %{t.name}"]) - - let stubs_signature t = - let args = - List.concat_map t.args ~f:(fun arg -> - match arg.arg_type with - | Bool -> [ "int" ] - | Int64 -> [ "int64_t" ] - | Double -> [ "double" ] - | Tensor -> [ "t" ] - | TensorOption -> [ "t" ] - | TensorOptions -> [ "int"; "int" ] - | ScalarType -> [ "int" ] - | Device -> [ "int" ] - | IntList -> [ "ptr int64_t"; "int" ] - | TensorOptList | TensorList -> [ "ptr t"; "int" ] - | String -> [ "string" ] - | Scalar -> [ "scalar" ]) - |> String.concat ~sep:" @-> " - in - match t.returns with - | `fixed _ -> [%string "ptr t @-> %{args} @-> returning void"] - | `dynamic -> [%string "%{args} @-> returning (ptr t)"] - - let replace_map = - Map.of_alist_exn (module String) [ "end", "end_"; "to", "to_"; "t", "tr" ] - - let caml_name name = - Map.find replace_map name |> Option.value ~default:name |> String.lowercase - - let caml_args t = - List.map t.args ~f:(fun arg -> - if named_arg arg then "~" ^ caml_name arg.arg_name else caml_name arg.arg_name) - |> String.concat ~sep:" " - - let caml_binding_args t = - List.map t.args ~f:(fun arg -> - let name = caml_name arg.arg_name in - match arg.arg_type with - | IntList -> - [%string - {|(List.map Int64.of_int %{name} |> CArray.of_list int64_t |> CArray.start) (List.length %{name})|}] - | TensorList -> - [%string "(CArray.of_list t %{name} |> CArray.start) (List.length %{name})"] - | TensorOptList -> - [%string - "(List.map (function Some x -> x | None -> null) %{name} |> CArray.of_list t \ - |> CArray.start) (List.length %{name})"] - | Bool -> [%string "(if %{name} then 1 else 0)"] - | ScalarType -> [%string "(Kind.packed_to_int %{name})"] - | TensorOptions -> - [%string "(Kind.packed_to_int (fst %{name})) (Device.to_int (snd %{name}))"] - | Device -> [%string "(Device.to_int %{name})"] - | Int64 -> - if String.( = ) name "reduction" - then "(Reduction.to_int reduction |> Int64.of_int)" - else [%string "(Int64.of_int %{name})"] - | TensorOption -> [%string "(match %{name} with | Some v -> v | None -> null)"] - | _ -> name) - |> String.concat ~sep:" " end exception Not_a_simple_arg @@ -407,105 +327,6 @@ let write_cpp funcs filename = pc ""; ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list))) -let write_stubs funcs filename = - Out_channel.with_file filename ~f:(fun out_channel -> - let p s = p out_channel s in - p "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; - p ""; - p "open Ctypes"; - p ""; - let funcs = Map.to_alist funcs |> List.chunks_of ~length:100 in - List.iteri funcs ~f:(fun idx funcs -> - p "module C%d(F: Cstubs.FOREIGN) = struct" idx; - p " open F"; - p " type t = unit ptr"; - p " let t : t typ = ptr void"; - p " type scalar = unit ptr"; - p " let scalar : scalar typ = ptr void"; - List.iter funcs ~f:(fun (exported_name, func) -> - p " let stubs_%s =" (Func.caml_name exported_name); - p " foreign \"atg_%s\"" exported_name; - p " (%s)" (Func.stubs_signature func); - p ""); - p "end"); - p "module C(F: Cstubs.FOREIGN) = struct"; - List.iteri funcs ~f:(fun idx _funcs -> p " include C%d(F)" idx); - p "end") - -let write_wrapper funcs filename = - Out_channel.with_file (filename ^ ".ml") ~f:(fun out_ml -> - Out_channel.with_file (filename ^ "_intf.ml") ~f:(fun out_intf -> - let pm s = p out_ml s in - let pi s = p out_intf s in - pm "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; - pm ""; - pm "open Ctypes"; - pm ""; - pm "module C = Torch_bindings.C(Torch_generated)"; - pm "open C.TensorG"; - pm ""; - pm "let to_tensor_list ptr ="; - pm " let rec loop ptr acc ="; - pm " let tensor = !@ptr in"; - pm " if is_null tensor"; - pm " then acc"; - pm " else begin"; - pm " Gc.finalise C.Tensor.free tensor;"; - pm " loop (ptr +@ 1) (tensor :: acc)"; - pm " end"; - pm " in"; - pm " let result = loop ptr [] in"; - pm " C.free (to_voidp ptr);"; - pm " List.rev result"; - pm ""; - pi "(* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! *)"; - pi ""; - pi "module type S = sig"; - pi " type t"; - pi " type _ scalar"; - pi ""; - Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> - let caml_name = Func.caml_name exported_name in - pm "let %s %s =" caml_name (Func.caml_args func); - (match func.returns with - | `fixed ntensors -> - pm " let out__ = CArray.make t %d in" ntensors; - pm - " stubs_%s (CArray.start out__) %s;" - caml_name - (Func.caml_binding_args func); - for i = 0 to ntensors - 1 do - pm " let t%d = CArray.get out__ %d in" i i; - pm " Gc.finalise C.Tensor.free t%d;" i - done; - pm - " %s" - (List.init ntensors ~f:(Printf.sprintf "t%d") |> String.concat ~sep:", ") - | `dynamic -> - pm - " stubs_%s %s |> to_tensor_list" - caml_name - (Func.caml_binding_args func)); - pm ""; - pi " val %s :" caml_name; - List.iter func.args ~f:(fun arg -> - let named_arg = - if Func.named_arg arg - then Printf.sprintf "%s:" (Func.caml_name arg.arg_name) - else "" - in - pi " %s%s ->" named_arg (Func.ml_arg_type arg)); - let returns = - match func.returns with - | `fixed 1 -> "t" - | `fixed ntensors -> - List.init ntensors ~f:(fun _ -> "t") |> String.concat ~sep:" * " - | `dynamic -> "t list" - in - pi " %s" returns; - pi ""); - pi "end")) - let methods = let c name args = { Func.name @@ -523,7 +344,7 @@ let methods = ; c "to" [ ca "self" Tensor; ca "device" Device ] ] -let run ~yaml_filename ~cpp_filename ~stubs_filename ~wrapper_filename = +let run ~yaml_filename ~cpp_filename = let funcs = read_yaml yaml_filename in let funcs = methods @ funcs in printf "Generating code for %d functions.\n%!" (List.length funcs); @@ -559,13 +380,9 @@ let run ~yaml_filename ~cpp_filename ~stubs_filename ~wrapper_filename = name, func)) |> Map.of_alist_exn (module String) in - write_cpp funcs cpp_filename; - write_stubs funcs stubs_filename; - write_wrapper funcs wrapper_filename + write_cpp funcs cpp_filename let () = run ~yaml_filename:"data/Declarations.yaml" - ~cpp_filename:"src/wrapper/torch_api_generated" - ~stubs_filename:"src/stubs/torch_bindings_generated.ml" - ~wrapper_filename:"src/wrapper/wrapper_generated" + ~cpp_filename:"../c_wrapper/torch_api_generated" From 7828132d2469027e58512ee5eae21753c0a7c66c Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Thu, 16 Nov 2023 04:00:05 +0100 Subject: [PATCH 04/12] Changed torch_api.cpp to reduce diff --- deps/c_wrapper/torch_api.cpp | 224 +++++++++++++++-------------------- 1 file changed, 98 insertions(+), 126 deletions(-) diff --git a/deps/c_wrapper/torch_api.cpp b/deps/c_wrapper/torch_api.cpp index 0890fa17..cbb828a4 100644 --- a/deps/c_wrapper/torch_api.cpp +++ b/deps/c_wrapper/torch_api.cpp @@ -4,11 +4,10 @@ #include #include #include -// #include -// #include #include "torch_api.h" #define caml_invalid_argument printf + using namespace std; int get_last_error(char *err) { @@ -23,7 +22,7 @@ int flush_error() { myerr = ""; return 0; ) -return 1; + return 1; } int at_manual_seed(int64_t seed) { @@ -31,7 +30,7 @@ int at_manual_seed(int64_t seed) { torch::manual_seed(seed); return 0; ) -return 1; + return 1; } vector of_carray_tensor(torch::Tensor **vs, int len) { @@ -40,6 +39,11 @@ vector of_carray_tensor(torch::Tensor **vs, int len) { return result; } +at::Device device_of_int(int d) { + if (d < 0) return at::Device(at::kCPU); + return at::Device(at::kCUDA, /*index=*/d); +} + int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev) { PROTECT( auto options = torch::TensorOptions().device(torch::kCUDA, dev).requires_grad(false); @@ -47,8 +51,7 @@ int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *s out__[0] = new torch::Tensor(tens); return 0; ) - // return nullptr; -return 1; + return 1; } int at_new_tensor(tensor *out__) { @@ -56,8 +59,7 @@ int at_new_tensor(tensor *out__) { out__[0] = new torch::Tensor(); return 0; ) - // return nullptr; -return 1; + return 1; } int at_empty_cache() { @@ -65,7 +67,7 @@ int at_empty_cache() { c10::cuda::CUDACachingAllocator::emptyCache(); return 0; ) -return 1; + return 1; } int at_no_grad(int flag) { @@ -73,8 +75,7 @@ int at_no_grad(int flag) { torch::GradMode::set_enabled((bool)flag); return 0; ) - // return flag; -return 1; + return 1; } int at_sync() { @@ -84,7 +85,7 @@ int at_sync() { return 0; ) // torch::cuda::synchronize(); -return 1; + return 1; } int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { @@ -124,7 +125,7 @@ int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) } return 0; ) -return 1; + return 1; } int at_float_vec(tensor *out__, double *vs, int len, int type) { @@ -134,8 +135,7 @@ int at_float_vec(tensor *out__, double *vs, int len, int type) { out__[0] = new torch::Tensor(tensor); return 0; ) - // return nullptr; -return 1; + return 1; } int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { @@ -145,26 +145,23 @@ int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { out__[0] = new torch::Tensor(tensor); return 0; ) - // return nullptr; -return 1; + return 1; } -int at_defined(int *i, tensor t) { +int at_defined(int *out__, tensor t) { PROTECT( - i[0] = t->defined(); + out__[0] = t->defined(); return 0; ) - // return -1; -return 1; + return 1; } -int at_dim(int *i, tensor t) { +int at_dim(int *out__, tensor t) { PROTECT( - i[0] = t->dim(); + out__[0] = t->dim(); return 0; ) - // return -1; -return 1; + return 1; } int at_shape(tensor t, int *dims) { @@ -173,15 +170,15 @@ int at_shape(tensor t, int *dims) { for (int dim : t->sizes()) dims[i++] = dim; return 0; ) -return 1; + return 1; } -int at_scalar_type(int *i, tensor t) { +int at_scalar_type(int *out__, tensor t) { PROTECT( - i[0] = static_cast(t->scalar_type()); + out__[0] = static_cast(t->scalar_type()); return 0; ) -return 1; + return 1; } int at_backward(tensor t, int keep_graph, int create_graph) { @@ -189,16 +186,15 @@ int at_backward(tensor t, int keep_graph, int create_graph) { t->backward({}, keep_graph, create_graph); return 0; ) -return 1; + return 1; } -int at_requires_grad(int *i, tensor t) { +int at_requires_grad(int *out__, tensor t) { PROTECT( - i[0] = t->requires_grad(); + out__[0] = t->requires_grad(); return 0; ) - // return -1; -return 1; + return 1; } int at_grad_set_enabled(int b) { @@ -207,8 +203,7 @@ int at_grad_set_enabled(int b) { torch::autograd::GradMode::set_enabled(b); return 0; ) - // return -1; -return 1; + return 1; } int at_get(tensor *out__, tensor t, int index) { @@ -216,8 +211,7 @@ int at_get(tensor *out__, tensor t, int index) { out__[0] = new torch::Tensor((*t)[index]); return 0; ) - // return nullptr; -return 1; + return 1; } template @@ -232,20 +226,20 @@ T at_value_at_indexes(tensor t, int *indexes, int indexes_len) { return T(); } -int at_double_value_at_indexes(double *i, tensor t, int *indexes, int indexes_len) { - PROTECT( - i[0] = at_value_at_indexes(t, indexes, indexes_len); - return 0; +int at_double_value_at_indexes(double *out__, tensor t, int *indexes, int indexes_len) { + PROTECT( + out__[0] = at_value_at_indexes(t, indexes, indexes_len); + return 0; ) -return 1; + return 1; } -int at_int64_value_at_indexes(int64_t *i, tensor t, int *indexes, int indexes_len) { +int at_int64_value_at_indexes(int64_t *out__, tensor t, int *indexes, int indexes_len) { PROTECT( - i[0] = at_value_at_indexes(t, indexes, indexes_len); + out__[0] = at_value_at_indexes(t, indexes, indexes_len); return 0; ) -return 1; + return 1; } template @@ -258,7 +252,7 @@ int at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { tensor.fill_(v); return 0; ) -return 1; + return 1; } int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { @@ -274,15 +268,15 @@ int at_fill_double(tensor t, double v) { t->fill_(v); return 0; ) -return 1; + return 1; } int at_fill_int64(tensor t, int64_t v) { PROTECT( t->fill_(v); - return 0; + return 0; ) -return 1; + return 1; } int at_print(tensor t) { @@ -291,7 +285,7 @@ int at_print(tensor t) { cout << *tensor << endl; return 0; ) -return 1; + return 1; } // char *at_to_string(tensor t, int line_size) { @@ -308,7 +302,7 @@ int at_copy_(tensor dst, tensor src) { dst->copy_(*src); return 0; ) -return 1; + return 1; } int at_save(tensor t, char *filename) { @@ -316,7 +310,7 @@ int at_save(tensor t, char *filename) { torch::save(*t, filename); return 0; ) -return 1; + return 1; } int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { @@ -327,7 +321,7 @@ int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *file archive.save_to(filename); return 0; ) -return 1; + return 1; } int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { @@ -341,9 +335,9 @@ int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *file // [read], no memory has to be freed. for (int i = 0; i < ntensors; ++i) tensors[i] = new torch::Tensor(ts[i]); - return 0; + return 0; ) -return 1; + return 1; } int at_load_callback(char *filename, void (*f)(char *, tensor)) { @@ -355,7 +349,7 @@ int at_load_callback(char *filename, void (*f)(char *, tensor)) { } return 0; ) -return 1; + return 1; } int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { @@ -372,9 +366,9 @@ int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *fil tensors[i]->copy_(tmp_tensor); } } - return 0; + return 0; ) -return 1; + return 1; } int at_load(char *filename, tensor *out__) { @@ -384,8 +378,7 @@ int at_load(char *filename, tensor *out__) { out__[0] = new torch::Tensor(tensor); return 0; ) - // return nullptr; -return 1; + return 1; } int at_free(tensor t) { @@ -393,7 +386,7 @@ int at_free(tensor t) { delete(t); return 0; ) -return 1; + return 1; } int at_run_backward(tensor *tensors, @@ -424,9 +417,9 @@ int at_run_backward(tensor *tensors, for (int i = 0; i < ninputs; ++i) { outputs[i] = static_cast(new torch::autograd::Variable(vl[i])); } - return 0; + return 0; ) -return 1; + return 1; } int ato_adam(optimizer *out__, double learning_rate, @@ -442,8 +435,7 @@ int ato_adam(optimizer *out__, double learning_rate, out__[0] = new torch::optim::Adam(vector(), options); return 0; ) - // return nullptr; -return 1; + return 1; } int ato_rmsprop(optimizer *out__, double learning_rate, @@ -460,11 +452,10 @@ int ato_rmsprop(optimizer *out__, double learning_rate, .weight_decay(weight_decay) .momentum(momentum) .centered(centered != 0); - out__[0] = new torch::optim::RMSprop(vector(), options); + out__[0] = new torch::optim::RMSprop(vector(), options); return 0; - ) - // return nullptr; -return 1; + ) + return 1; } int ato_sgd(optimizer *out__, double learning_rate, @@ -482,8 +473,7 @@ int ato_sgd(optimizer *out__, double learning_rate, out__[0] = new torch::optim::SGD(vector(), options); return 0; ) - // return nullptr; -return 1; + return 1; } int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { @@ -491,7 +481,7 @@ int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { t->add_parameters(of_carray_tensor(tensors, ntensors)); return 0; ) -return 1; + return 1; } int ato_set_learning_rate(optimizer t, double learning_rate) { @@ -503,10 +493,10 @@ int ato_set_learning_rate(optimizer t, double learning_rate) { else if (auto sgd = dynamic_cast(t)) sgd->options.learning_rate(learning_rate); else - caml_invalid_argument("unexpected optimizer"); + caml_invalid_argument("unexpected optimizer"); return 0; ) -return 1; + return 1; } int ato_set_momentum(optimizer t, double momentum) { @@ -518,10 +508,10 @@ int ato_set_momentum(optimizer t, double momentum) { else if (auto sgd = dynamic_cast(t)) sgd->options.momentum(momentum); else - caml_invalid_argument("unexpected optimizer"); + caml_invalid_argument("unexpected optimizer"); return 0; ) -return 1; + return 1; } int ato_zero_grad(optimizer t) { @@ -529,7 +519,7 @@ int ato_zero_grad(optimizer t) { t->zero_grad(); return 0; ) -return 1; + return 1; } int ato_step(optimizer t) { @@ -537,7 +527,7 @@ int ato_step(optimizer t) { t->step(); return 0; ) -return 1; + return 1; } int ato_free(optimizer t) { @@ -545,7 +535,7 @@ int ato_free(optimizer t) { delete(t); return 0; ) -return 1; + return 1; } int ats_int(scalar *out__, int64_t v) { @@ -553,8 +543,7 @@ int ats_int(scalar *out__, int64_t v) { out__[0] = new torch::Scalar(v); return 0; ) - // return nullptr; -return 1; + return 1; } int ats_float(scalar *out__, double v) { @@ -562,8 +551,7 @@ int ats_float(scalar *out__, double v) { out__[0] = new torch::Scalar(v); return 0; ) - // return nullptr; -return 1; + return 1; } int ats_free(scalar s) { @@ -571,34 +559,31 @@ int ats_free(scalar s) { delete(s); return 0; ) -return 1; + return 1; } -int atc_cuda_device_count(int *i) { +int atc_cuda_device_count(int *out__) { PROTECT( - i[0] = torch::cuda::device_count(); + out__[0] = torch::cuda::device_count(); return 0; ) - // return -1; -return 1; + return 1; } -int atc_cuda_is_available(int *i) { +int atc_cuda_is_available(int *out__) { PROTECT( - i[0] = torch::cuda::is_available(); + out__[0] = torch::cuda::is_available(); return 0; ) - // return -1; -return 1; + return 1; } -int atc_cudnn_is_available(int *i) { +int atc_cudnn_is_available(int *out__) { PROTECT( - i[0] = torch::cuda::cudnn_is_available(); + out__[0] = torch::cuda::cudnn_is_available(); return 0; ) - // return -1; -return 1; + return 1; } int atc_set_benchmark_cudnn(int b) { @@ -606,7 +591,7 @@ int atc_set_benchmark_cudnn(int b) { at::globalContext().setBenchmarkCuDNN(b); return 0; ) -return 1; + return 1; } int atm_load(char *filename, module *out__) { @@ -614,8 +599,7 @@ int atm_load(char *filename, module *out__) { out__[0] = new torch::jit::script::Module(torch::jit::load(filename)); return 0; ) - // return nullptr; -return 1; + return 1; } int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { @@ -631,8 +615,7 @@ int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { out__[0] = new torch::Tensor(output.toTensor()); return 0; ) - // return nullptr; -return 1; + return 1; } int atm_forward_(ivalue *out__, module m, @@ -646,8 +629,7 @@ int atm_forward_(ivalue *out__, module m, out__[0] = new torch::jit::IValue(output); return 0; ) - // return nullptr; -return 1; + return 1; } int atm_free(module m) { @@ -655,7 +637,7 @@ int atm_free(module m) { delete(m); return 0; ) -return 1; + return 1; } int ati_tensor(ivalue *out__, tensor t) { @@ -663,8 +645,7 @@ int ati_tensor(ivalue *out__, tensor t) { out__[0] = new torch::jit::IValue(*t); return 0; ) - // return nullptr; -return 1; + return 1; } int ati_int(ivalue *out__, int64_t i) { @@ -672,8 +653,7 @@ int ati_int(ivalue *out__, int64_t i) { out__[0] = new torch::jit::IValue(i); return 0; ) - // return nullptr; -return 1; + return 1; } int ati_double(ivalue *out__, double d) { @@ -681,8 +661,7 @@ int ati_double(ivalue *out__, double d) { out__[0] = new torch::jit::IValue(d); return 0; ) - // return nullptr; -return 1; + return 1; } int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { @@ -692,8 +671,7 @@ int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { out__[0] = new torch::jit::IValue(torch::ivalue::Tuple::create(vec)); return 0; ) - // return nullptr; -return 1; + return 1; } int ati_tag(int *out__, ivalue i) { @@ -706,7 +684,7 @@ int ati_tag(int *out__, ivalue i) { return 0; ) myerr = strdup(("unsupported tag" + i->tagKind()).c_str()); -return 1; + return 1; } int ati_to_int(int64_t *out__, ivalue i) { @@ -714,7 +692,7 @@ int ati_to_int(int64_t *out__, ivalue i) { out__[0] = i->toInt(); return 0; ) -return 1; + return 1; } int ati_to_double(double *out__, ivalue i) { @@ -722,7 +700,7 @@ int ati_to_double(double *out__, ivalue i) { out__[0] = i->toDouble(); return 0; ) -return 1; + return 1; } int ati_to_tensor(tensor *out__, ivalue i) { @@ -730,8 +708,7 @@ int ati_to_tensor(tensor *out__, ivalue i) { out__[0] = new torch::Tensor(i->toTensor()); return 0; ) - // return nullptr; -return 1; + return 1; } @@ -740,7 +717,7 @@ int ati_tuple_length(int *out__, ivalue i) { out__[0] = i->toTuple()->elements().size(); return 0; ) -return 1; + return 1; } int ati_to_tuple(ivalue i, @@ -756,7 +733,7 @@ int ati_to_tuple(ivalue i, outputs[i] = new torch::jit::IValue(vec[i]); return 0; ) -return 1; + return 1; } @@ -765,12 +742,7 @@ int ati_free(ivalue i) { delete(i); return 0; ) -return 1; -} - -at::Device device_of_int(int d) { - if (d < 0) return at::Device(at::kCPU); - return at::Device(at::kCUDA, /*index=*/d); + return 1; } #include "torch_api_generated.cpp.h" From 0b551eacb261229c4e4fb21c2297773cd5409269 Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 16 Dec 2022 16:38:56 +0100 Subject: [PATCH 05/12] Added wrapper from ocaml-torch 0.14 Copied from https://github.com/LaurentMazare/ocaml-torch/tree/a6499811f40282a071179d4306afbbb6023dcc4a/src/wrapper --- deps/c_wrapper/torch_api.cpp | 909 +++++++++++++++++++++-------------- deps/c_wrapper/torch_api.h | 187 +++---- 2 files changed, 644 insertions(+), 452 deletions(-) diff --git a/deps/c_wrapper/torch_api.cpp b/deps/c_wrapper/torch_api.cpp index cbb828a4..84f6ccc5 100644 --- a/deps/c_wrapper/torch_api.cpp +++ b/deps/c_wrapper/torch_api.cpp @@ -1,36 +1,15 @@ #include #include +#include #include -#include -#include #include +#include #include "torch_api.h" -#define caml_invalid_argument printf - using namespace std; -int get_last_error(char *err) { - int len = strlen(myerr); - for (int i = 0; i < len; ++i) err[i] = myerr[i]; - err[len] = '\0'; - return 0; -} - -int flush_error() { - PROTECT( - myerr = ""; - return 0; - ) - return 1; -} - -int at_manual_seed(int64_t seed) { - PROTECT( - torch::manual_seed(seed); - return 0; - ) - return 1; +void at_manual_seed(int64_t seed) { + torch::manual_seed(seed); } vector of_carray_tensor(torch::Tensor **vs, int len) { @@ -39,179 +18,173 @@ vector of_carray_tensor(torch::Tensor **vs, int len) { return result; } +c10::List> of_carray_tensor_opt(torch::Tensor **vs, int len) { + vector> result; + for (int i = 0; i < len; ++i) { + result.push_back(vs[i] != nullptr ? c10::optional(*(vs[i])) : c10::nullopt); + } + return c10::List>(result); +} + at::Device device_of_int(int d) { if (d < 0) return at::Device(at::kCPU); return at::Device(at::kCUDA, /*index=*/d); } -int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev) { - PROTECT( - auto options = torch::TensorOptions().device(torch::kCUDA, dev).requires_grad(false); - torch::Tensor tens = torch::from_blob(data, torch::IntArrayRef(dims, ndims), torch::IntArrayRef(strides, nstrides), options); - out__[0] = new torch::Tensor(tens); - return 0; - ) - return 1; -} - -int at_new_tensor(tensor *out__) { - PROTECT( - out__[0] = new torch::Tensor(); - return 0; - ) - return 1; -} - -int at_empty_cache() { - PROTECT( - c10::cuda::CUDACachingAllocator::emptyCache(); - return 0; - ) - return 1; -} - -int at_no_grad(int flag) { - PROTECT( - torch::GradMode::set_enabled((bool)flag); - return 0; - ) - return 1; -} - -int at_sync() { +tensor at_new_tensor() { PROTECT( - at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); - C10_CUDA_CHECK(cudaStreamSynchronize(stream)); - return 0; + return new torch::Tensor(); ) - // torch::cuda::synchronize(); - return 1; + return nullptr; } -int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { +tensor at_tensor_of_data(void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { PROTECT( - // auto options = torch::TensorOptions().dtype(torch::ScalarType(type)).requires_grad(false); torch::Tensor tensor = torch::zeros(torch::IntArrayRef(dims, ndims), torch::ScalarType(type)); - if (element_size_in_bytes != tensor.element_size()) { - myerr = strdup("incoherent element sizes in bytes"); - return 1; - } + if (element_size_in_bytes != tensor.element_size()) + caml_failwith("incoherent element sizes in bytes"); void *tensor_data = tensor.data_ptr(); memcpy(tensor_data, vs, tensor.numel() * element_size_in_bytes); - out__[0] = new torch::Tensor(tensor); - return 0; + return new torch::Tensor(tensor); ) - return 1; + return nullptr; } -int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { +void at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { PROTECT( - if (elt_size_in_bytes != tensor->element_size()) { - myerr = strdup("incoherent element sizes in bytes"); - return 1; - } - if (numel != tensor->numel()) { - myerr = strdup("incoherent number of elements"); - return 1; - } + if (elt_size_in_bytes != tensor->element_size()) + caml_failwith("incoherent element sizes in bytes"); + if (numel != tensor->numel()) + caml_failwith("incoherent number of elements"); if (tensor->device().type() != at::kCPU) { - torch::Tensor tmp_tensor = tensor->to(at::kCPU); - void *tensor_data = tmp_tensor.contiguous().data_ptr(); + torch::Tensor tmp_tensor = tensor->to(at::kCPU).contiguous(); + void *tensor_data = tmp_tensor.data_ptr(); memcpy(vs, tensor_data, numel * elt_size_in_bytes); } else { - void *tensor_data = tensor->contiguous().data_ptr(); + torch::Tensor tmp_tensor = tensor->contiguous(); + void *tensor_data = tmp_tensor.data_ptr(); memcpy(vs, tensor_data, numel * elt_size_in_bytes); } - return 0; ) - return 1; } -int at_float_vec(tensor *out__, double *vs, int len, int type) { +tensor at_float_vec(double *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; - out__[0] = new torch::Tensor(tensor); - return 0; + return new torch::Tensor(tensor); ) - return 1; + return nullptr; } -int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { +tensor at_int_vec(int64_t *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; - out__[0] = new torch::Tensor(tensor); - return 0; + return new torch::Tensor(tensor); + ) + return nullptr; +} + +int at_defined(tensor t) { + PROTECT(return t->defined();) + return -1; +} + +int at_is_sparse(tensor t) { + PROTECT(return t->is_sparse();) + return -1; +} + +int at_dim(tensor t) { + PROTECT(return t->dim();) + return -1; +} + +void at_shape(tensor t, int *dims) { + PROTECT( + int i = 0; + for (int dim : t->sizes()) dims[i++] = dim; ) - return 1; } -int at_defined(int *out__, tensor t) { +void at_stride(tensor t, int64_t *dims) { PROTECT( - out__[0] = t->defined(); - return 0; + int i = 0; + for (int64_t dim: t->strides()) dims[i++] = dim; ) - return 1; } -int at_dim(int *out__, tensor t) { +int at_scalar_type(tensor t) { PROTECT( - out__[0] = t->dim(); - return 0; + return static_cast(t->scalar_type()); ) - return 1; } -int at_shape(tensor t, int *dims) { +void at_autocast_clear_cache() { + at::autocast::clear_cache(); +} + +int at_autocast_decrement_nesting() { PROTECT( - int i = 0; - for (int dim : t->sizes()) dims[i++] = dim; - return 0; + return at::autocast::decrement_nesting(); ) - return 1; + return -1; } -int at_scalar_type(int *out__, tensor t) { +int at_autocast_increment_nesting() { PROTECT( - out__[0] = static_cast(t->scalar_type()); - return 0; + return at::autocast::increment_nesting(); ) - return 1; + return -1; } -int at_backward(tensor t, int keep_graph, int create_graph) { +int at_autocast_is_enabled() { PROTECT( - t->backward({}, keep_graph, create_graph); - return 0; + return at::autocast::is_enabled(); ) - return 1; + return -1; } -int at_requires_grad(int *out__, tensor t) { +int at_autocast_set_enabled(int b) { PROTECT( - out__[0] = t->requires_grad(); - return 0; + bool is_enabled = at::autocast::is_enabled(); + at::autocast::set_enabled(b); + return is_enabled; + ) + return -1; +} + +int at_device(tensor tensor) { + PROTECT ( + auto device = tensor->device(); + if (device.is_cpu()) return -1; + return device.index(); ) - return 1; +} + +void at_backward(tensor t, int keep_graph, int create_graph) { + PROTECT(t->backward({}, keep_graph, create_graph);) +} + +int at_requires_grad(tensor t) { + PROTECT(return t->requires_grad();) + return -1; } int at_grad_set_enabled(int b) { PROTECT( bool is_enabled = torch::autograd::GradMode::is_enabled(); torch::autograd::GradMode::set_enabled(b); - return 0; + return is_enabled; ) - return 1; + return -1; } -int at_get(tensor *out__, tensor t, int index) { - PROTECT( - out__[0] = new torch::Tensor((*t)[index]); - return 0; - ) - return 1; +tensor at_get(tensor t, int index) { + PROTECT(return new torch::Tensor((*t)[index]);) + return nullptr; } template @@ -226,105 +199,77 @@ T at_value_at_indexes(tensor t, int *indexes, int indexes_len) { return T(); } -int at_double_value_at_indexes(double *out__, tensor t, int *indexes, int indexes_len) { - PROTECT( - out__[0] = at_value_at_indexes(t, indexes, indexes_len); - return 0; - ) - return 1; +double at_double_value_at_indexes(tensor t, int *indexes, int indexes_len) { + return at_value_at_indexes(t, indexes, indexes_len); } -int at_int64_value_at_indexes(int64_t *out__, tensor t, int *indexes, int indexes_len) { - PROTECT( - out__[0] = at_value_at_indexes(t, indexes, indexes_len); - return 0; - ) - return 1; +int64_t at_int64_value_at_indexes(tensor t, int *indexes, int indexes_len) { + return at_value_at_indexes(t, indexes, indexes_len); } template -int at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { +void at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { PROTECT( torch::Tensor tensor = *t; for (int i = 0; i < indexes_len; ++i) { tensor = tensor[indexes[i]]; } tensor.fill_(v); - return 0; ) - return 1; } -int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { +void at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { at_set_value_at_indexes(t, indexes, indexes_len, v); } -int at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { +void at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { at_set_value_at_indexes(t, indexes, indexes_len, v); } -int at_fill_double(tensor t, double v) { - PROTECT( - t->fill_(v); - return 0; - ) - return 1; +void at_fill_double(tensor t, double v) { + PROTECT(t->fill_(v);) } -int at_fill_int64(tensor t, int64_t v) { - PROTECT( - t->fill_(v); - return 0; - ) - return 1; +void at_fill_int64(tensor t, int64_t v) { + PROTECT(t->fill_(v);) } -int at_print(tensor t) { +void at_print(tensor t) { PROTECT( torch::Tensor *tensor = (torch::Tensor*)t; cout << *tensor << endl; - return 0; ) - return 1; } -// char *at_to_string(tensor t, int line_size) { -// PROTECT( -// std::ostringstream oss; -// torch::print(oss, *t, line_size); -// return strdup(oss.str().c_str()); -// ) -// return nullptr; -// } - -int at_copy_(tensor dst, tensor src) { +char *at_to_string(tensor t, int line_size) { PROTECT( - dst->copy_(*src); - return 0; + std::ostringstream oss; + torch::print(oss, *t, line_size); + return strdup(oss.str().c_str()); ) - return 1; + return nullptr; } -int at_save(tensor t, char *filename) { +void at_copy_(tensor dst, tensor src) { PROTECT( - torch::save(*t, filename); - return 0; + dst->copy_(*src); ) - return 1; } -int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +void at_save(tensor t, char *filename) { + PROTECT(torch::save(*t, filename);) +} + +void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::OutputArchive archive; for (int i = 0; i < ntensors; ++i) archive.write(std::string(tensor_names[i]), *(tensors[i]), /* buffer=*/ false); archive.save_to(filename); - return 0; ) - return 1; } -int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::InputArchive archive; archive.load_from(std::string(filename)); @@ -335,24 +280,20 @@ int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *file // [read], no memory has to be freed. for (int i = 0; i < ntensors; ++i) tensors[i] = new torch::Tensor(ts[i]); - return 0; ) - return 1; } -int at_load_callback(char *filename, void (*f)(char *, tensor)) { +void at_load_callback(char *filename, void (*f)(char *, tensor)) { PROTECT( auto module = torch::jit::load(filename); for (const auto &p : module.named_parameters()) { auto v = p.value; f((char*)p.name.c_str(), new torch::Tensor(v)); } - return 0; ) - return 1; } -int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::NoGradGuard no_grad; torch::serialize::InputArchive archive; @@ -366,30 +307,41 @@ int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *fil tensors[i]->copy_(tmp_tensor); } } - return 0; ) - return 1; } -int at_load(char *filename, tensor *out__) { +tensor at_load(char *filename) { PROTECT( torch::Tensor tensor; torch::load(tensor, filename); - out__[0] = new torch::Tensor(tensor); - return 0; + return new torch::Tensor(tensor); ) - return 1; + return nullptr; } -int at_free(tensor t) { - PROTECT( - delete(t); - return 0; - ) - return 1; +int at_get_num_interop_threads() { + PROTECT(return at::get_num_interop_threads();) + return -1; +} + +int at_get_num_threads() { + PROTECT(return at::get_num_threads();) + return -1; +} + +void at_set_num_interop_threads(int n_threads) { + PROTECT(at::set_num_interop_threads(n_threads);) +} + +void at_set_num_threads(int n_threads) { + PROTECT(at::set_num_threads(n_threads);) +} + +void at_free(tensor t) { + delete(t); } -int at_run_backward(tensor *tensors, +void at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs, @@ -397,48 +349,45 @@ int at_run_backward(tensor *tensors, int keep_graph, int create_graph) { PROTECT( - torch::autograd::Engine engine; vector roots; for (int i = 0; i < ntensors; ++i) - roots.push_back(torch::autograd::impl::gradient_edge(torch::autograd::as_variable_ref(*tensors[i]))); + roots.push_back(torch::autograd::impl::gradient_edge(*tensors[i])); vector inputs_; for (int i = 0; i < ninputs; ++i) { if (!inputs[i]->requires_grad()) caml_invalid_argument("one of the input tensor does not use set_requires_grad"); - inputs_.push_back(torch::autograd::impl::gradient_edge(torch::autograd::as_variable_ref(*inputs[i]))); + inputs_.push_back(torch::autograd::impl::gradient_edge(*inputs[i])); } vector grads; for (int i = 0; i < ntensors; ++i) grads.push_back(torch::ones_like(*tensors[i])); - auto vl = torch::autograd::Engine::get_default_engine().execute(roots, grads, keep_graph, create_graph, inputs_); + auto vl = torch::autograd::Engine::get_default_engine().execute(roots, grads, keep_graph, create_graph, false, inputs_); for (int i = 0; i < ninputs; ++i) { outputs[i] = static_cast(new torch::autograd::Variable(vl[i])); } - return 0; ) - return 1; } -int ato_adam(optimizer *out__, double learning_rate, +optimizer ato_adam(double learning_rate, double beta1, double beta2, - double weight_decay) { + double weight_decay, + double eps) { PROTECT( auto options = torch::optim::AdamOptions(learning_rate) - .beta1(beta1) - .beta2(beta2) - .weight_decay(weight_decay); - out__[0] = new torch::optim::Adam(vector(), options); - return 0; + .betas(std::tuple(beta1, beta2)) + .weight_decay(weight_decay) + .eps(eps); + return new torch::optim::Adam(vector(), options); ) - return 1; + return nullptr; } -int ato_rmsprop(optimizer *out__, double learning_rate, +optimizer ato_rmsprop(double learning_rate, double alpha, double eps, double weight_decay, @@ -452,297 +401,519 @@ int ato_rmsprop(optimizer *out__, double learning_rate, .weight_decay(weight_decay) .momentum(momentum) .centered(centered != 0); - out__[0] = new torch::optim::RMSprop(vector(), options); - return 0; - ) - return 1; + return new torch::optim::RMSprop(vector(), options); + ) + return nullptr; } -int ato_sgd(optimizer *out__, double learning_rate, +optimizer ato_sgd(double learning_rate, double momentum, double dampening, double weight_decay, int nesterov) { PROTECT( - auto options = + auto options = torch::optim::SGDOptions(learning_rate) .momentum(momentum) .dampening(dampening) .weight_decay(weight_decay) .nesterov(nesterov); - out__[0] = new torch::optim::SGD(vector(), options); - return 0; + return new torch::optim::SGD(vector(), options); ) - return 1; + return nullptr; } -int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { +void ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { PROTECT( - t->add_parameters(of_carray_tensor(tensors, ntensors)); - return 0; + for (int i = 0; i < ntensors; ++i) + t->param_groups()[0].params().push_back(*(tensors[i])); ) - return 1; } -int ato_set_learning_rate(optimizer t, double learning_rate) { +void ato_set_learning_rate(optimizer t, double learning_rate) { PROTECT( - if (auto adam = dynamic_cast(t)) - adam->options.learning_rate(learning_rate); - else if (auto rms = dynamic_cast(t)) - rms->options.learning_rate(learning_rate); - else if (auto sgd = dynamic_cast(t)) - sgd->options.learning_rate(learning_rate); + torch::optim::OptimizerOptions* d = &(t->defaults()); + if (auto adam = dynamic_cast(d)) { + adam->lr(learning_rate); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto adam2 = dynamic_cast(d)) { + adam2->lr(learning_rate); + } + else caml_invalid_argument("unexpected param group type"); + } + } + else if (auto rms = dynamic_cast(d)) { + rms->lr(learning_rate); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto rms2 = dynamic_cast(d)) { + rms2->lr(learning_rate); + } + else caml_invalid_argument("unexpected param group type"); + } + } + else if (auto sgd = dynamic_cast(d)) { + sgd->lr(learning_rate); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto sgd2 = dynamic_cast(d)) { + sgd2->lr(learning_rate); + } + else caml_invalid_argument("unexpected param group type"); + } + } else caml_invalid_argument("unexpected optimizer"); - return 0; ) - return 1; } -int ato_set_momentum(optimizer t, double momentum) { +void ato_set_momentum(optimizer t, double momentum) { PROTECT( - if (auto adam = dynamic_cast(t)) - adam->options.beta1(momentum); - else if (auto rms = dynamic_cast(t)) - rms->options.momentum(momentum); - else if (auto sgd = dynamic_cast(t)) - sgd->options.momentum(momentum); + torch::optim::OptimizerOptions* d = &(t->defaults()); + if (auto adam = dynamic_cast(d)) { + auto betas = adam->betas(); + adam->betas(std::tuple(momentum, get<1>(betas))); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto adam2 = dynamic_cast(d)) { + adam2->betas(std::tuple(momentum, get<1>(betas))); + } + else caml_invalid_argument("unexpected param group type"); + } + } + else if (auto rms = dynamic_cast(d)) { + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto rms2 = dynamic_cast(d)) { + rms2->momentum(momentum); + } + else caml_invalid_argument("unexpected param group type"); + } + } + else if (auto sgd = dynamic_cast(d)) { + sgd->momentum(momentum); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto sgd2 = dynamic_cast(d)) { + sgd2->momentum(momentum); + } + else caml_invalid_argument("unexpected param group type"); + } + } else - caml_invalid_argument("unexpected optimizer"); - return 0; + caml_invalid_argument("unexpected optimizer"); ) - return 1; } -int ato_zero_grad(optimizer t) { + +void ato_zero_grad(optimizer t) { + PROTECT(t->zero_grad();) +} + +void ato_step(optimizer t) { + PROTECT(t->step();) +} + +void ato_free(optimizer t) { + delete(t); +} + +scalar ats_int(int64_t v) { + PROTECT(return new torch::Scalar(v);) + return nullptr; +} + +scalar ats_float(double v) { + PROTECT(return new torch::Scalar(v);) + return nullptr; +} + +int64_t ats_to_int(scalar s) { + PROTECT(return s->toLong();) + return -1; +} + +double ats_to_float(scalar s) { + PROTECT(return s->toDouble();) + return 0.; +} + +char *ats_to_string(scalar s) { + PROTECT( + using namespace at; + std::ostringstream oss; + oss << (*s); + return strdup(oss.str().c_str()); + ) + return nullptr; +} + +void ats_free(scalar s) { + delete(s); +} + +int atc_cuda_device_count() { + PROTECT(return torch::cuda::device_count();) + return -1; +} + +int atc_cuda_is_available() { + PROTECT(return torch::cuda::is_available();) + return -1; +} + +int atc_cudnn_is_available() { + PROTECT(return torch::cuda::cudnn_is_available();) + return -1; +} + +void atc_set_benchmark_cudnn(int b) { + at::globalContext().setBenchmarkCuDNN(b); +} + +module atm_load(char *filename) { + PROTECT( + return new torch::jit::script::Module(torch::jit::load(filename)); + ) + return nullptr; +} + +module atm_load_str(char *data, size_t sz) { PROTECT( - t->zero_grad(); - return 0; + std::istringstream stream(std::string(data, sz)); + return new torch::jit::script::Module(torch::jit::load(stream)); ) - return 1; + return nullptr; } -int ato_step(optimizer t) { +tensor atm_forward(module m, tensor *tensors, int ntensors) { PROTECT( - t->step(); - return 0; + std::vector inputs; + for (int i = 0; i < ntensors; ++i) + inputs.push_back(*(tensors[i])); + torch::jit::IValue output = m->forward(inputs); + if (!output.isTensor()) + caml_failwith("forward did not return a tensor"); + return new torch::Tensor(output.toTensor()); ) - return 1; + return nullptr; } -int ato_free(optimizer t) { +ivalue atm_forward_(module m, + ivalue *ivalues, + int nivalues) { PROTECT( - delete(t); - return 0; + std::vector inputs; + for (int i = 0; i < nivalues; ++i) + inputs.push_back(*(ivalues[i])); + torch::jit::IValue output = m->forward(inputs); + return new torch::jit::IValue(output); ) - return 1; + return nullptr; +} + +void atm_free(module m) { + delete(m); } -int ats_int(scalar *out__, int64_t v) { +void atm_to(module m, int device, int dtype, bool non_blocking) { PROTECT( - out__[0] = new torch::Scalar(v); - return 0; + m->to(device_of_int(device), at::ScalarType(dtype), non_blocking); ) - return 1; } -int ats_float(scalar *out__, double v) { +ivalue ati_tensor(tensor t) { PROTECT( - out__[0] = new torch::Scalar(v); - return 0; + return new torch::jit::IValue(*t); ) - return 1; + return nullptr; } -int ats_free(scalar s) { +ivalue ati_int(int64_t i) { PROTECT( - delete(s); - return 0; + return new torch::jit::IValue(i); ) - return 1; + return nullptr; } -int atc_cuda_device_count(int *out__) { +ivalue ati_double(double d) { PROTECT( - out__[0] = torch::cuda::device_count(); - return 0; + return new torch::jit::IValue(d); ) - return 1; + return nullptr; } -int atc_cuda_is_available(int *out__) { +ivalue ati_bool(int i) { PROTECT( - out__[0] = torch::cuda::is_available(); - return 0; + return new torch::jit::IValue((bool)i); ) - return 1; + return nullptr; } -int atc_cudnn_is_available(int *out__) { +ivalue ati_string(char *s) { PROTECT( - out__[0] = torch::cuda::cudnn_is_available(); - return 0; + string str(s); + return new torch::jit::IValue(str); ) - return 1; + return nullptr; } -int atc_set_benchmark_cudnn(int b) { +ivalue ati_none() { PROTECT( - at::globalContext().setBenchmarkCuDNN(b); - return 0; + return new torch::jit::IValue(); ) - return 1; + return nullptr; } -int atm_load(char *filename, module *out__) { +ivalue ati_tuple(ivalue *is, int nvalues) { PROTECT( - out__[0] = new torch::jit::script::Module(torch::jit::load(filename)); - return 0; + vector vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); + return new torch::jit::IValue(torch::ivalue::Tuple::create(vec)); ) - return 1; + return nullptr; } -int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { +ivalue ati_generic_list(ivalue *is, int nvalues) { PROTECT( - std::vector inputs; - for (int i = 0; i < ntensors; ++i) - inputs.push_back(*(tensors[i])); - torch::jit::IValue output = m->forward(inputs); - if (!output.isTensor()) { - myerr = strdup("forward did not return a tensor"); - return 1; - } - out__[0] = new torch::Tensor(output.toTensor()); - return 0; + c10::List vec(c10::AnyType::get()); + for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); + return new torch::jit::IValue(c10::List(vec)); ) - return 1; + return nullptr; } -int atm_forward_(ivalue *out__, module m, - ivalue *ivalues, - int nivalues) { +ivalue ati_generic_dict(ivalue *is, int nvalues) { + c10::Dict dict(c10::AnyType::get(), c10::AnyType::get()); PROTECT( - std::vector inputs; - for (int i = 0; i < nivalues; ++i) - inputs.push_back(*(ivalues[i])); - torch::jit::IValue output = m->forward(inputs); - out__[0] = new torch::jit::IValue(output); - return 0; + for (int i = 0; i < nvalues; ++i) dict.insert(*(is[2*i]), *(is[2*i+1])); + return new torch::jit::IValue(dict); ) - return 1; + return nullptr; } -int atm_free(module m) { +ivalue ati_int_list(int64_t *is, int nvalues) { PROTECT( - delete(m); - return 0; + c10::List vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(is[i]); + return new torch::jit::IValue(vec); ) - return 1; + return nullptr; } -int ati_tensor(ivalue *out__, tensor t) { +ivalue ati_double_list(double *is, int nvalues) { PROTECT( - out__[0] = new torch::jit::IValue(*t); - return 0; + c10::List vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(is[i]); + return new torch::jit::IValue(vec); ) - return 1; + return nullptr; } -int ati_int(ivalue *out__, int64_t i) { +ivalue ati_bool_list(char *is, int nvalues) { PROTECT( - out__[0] = new torch::jit::IValue(i); - return 0; + c10::List vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(is[i] != 0); + return new torch::jit::IValue(vec); ) - return 1; + return nullptr; } -int ati_double(ivalue *out__, double d) { +ivalue ati_string_list(char **is, int nvalues) { PROTECT( - out__[0] = new torch::jit::IValue(d); - return 0; + c10::List vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(string(is[i])); + return new torch::jit::IValue(vec); ) - return 1; + return nullptr; } -int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { +ivalue ati_tensor_list(tensor *is, int nvalues) { PROTECT( - vector vec; + c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); - out__[0] = new torch::jit::IValue(torch::ivalue::Tuple::create(vec)); - return 0; + return new torch::jit::IValue(vec); ) - return 1; + return nullptr; } -int ati_tag(int *out__, ivalue i) { +int ati_tag(ivalue i) { PROTECT( - if (i->isTensor()) out__[0] = 0; - else if (i->isInt()) out__[0] = 1; - else if (i->isDouble()) out__[0] = 2; - else if (i->isTuple()) out__[0] = 3; - // myerr = strdup(("unsupported tag" + i->tagKind()).c_str()); - return 0; + if (i->isNone()) return 0; + else if (i->isTensor()) return 1; + else if (i->isDouble()) return 2; + else if (i->isInt()) return 3; + else if (i->isBool()) return 4; + else if (i->isTuple()) return 5; + else if (i->isIntList()) return 6; + else if (i->isDoubleList()) return 7; + else if (i->isBoolList()) return 8; + else if (i->isString()) return 9; + else if (i->isTensorList()) return 10; + else if (i->isList()) return 12; + else if (i->isGenericDict()) return 13; + caml_failwith(("unsupported tag" + i->tagKind()).c_str()); + return -1; ) - myerr = strdup(("unsupported tag" + i->tagKind()).c_str()); - return 1; + return -1; } -int ati_to_int(int64_t *out__, ivalue i) { +int64_t ati_to_int(ivalue i) { PROTECT( - out__[0] = i->toInt(); - return 0; + return i->toInt(); ) - return 1; + return -1; } -int ati_to_double(double *out__, ivalue i) { +double ati_to_double(ivalue i) { PROTECT( - out__[0] = i->toDouble(); - return 0; + return i->toDouble(); ) - return 1; + return 0.; } -int ati_to_tensor(tensor *out__, ivalue i) { +int ati_to_bool(ivalue i) { PROTECT( - out__[0] = new torch::Tensor(i->toTensor()); - return 0; + return i->toBool(); ) - return 1; + return -1; } +char *ati_to_string(ivalue i) { + PROTECT( + auto str = i->toStringRef(); + return strdup(str.c_str()); + ) + return nullptr; +} + +tensor ati_to_tensor(ivalue i) { + PROTECT( + return new torch::Tensor(i->toTensor()); + ) + return nullptr; +} -int ati_tuple_length(int *out__, ivalue i) { +int ati_length(ivalue i) { PROTECT( - out__[0] = i->toTuple()->elements().size(); - return 0; + if (i->isTuple()) return i->toTuple()->elements().size(); + else if (i->isIntList()) return i->toIntList().size(); + else if (i->isDoubleList()) return i->toDoubleList().size(); + else if (i->isBoolList()) return i->toBoolList().size(); + else if (i->isString()) return i->toStringRef().size(); + else if (i->isTensorList()) return i->toTensorList().size(); + else if (i->isList()) return i->toList().size(); + else if (i->isGenericDict()) return i->toGenericDict().size(); + caml_invalid_argument("unsupported tag for this length"); + return -1; ) - return 1; + return -1; } -int ati_to_tuple(ivalue i, +int ati_tuple_length(ivalue i) { + PROTECT( + return i->toTuple()->elements().size(); + ) + return -1; +} + +void ati_to_tuple(ivalue i, ivalue *outputs, int noutputs) { PROTECT( auto vec = i->toTuple()->elements(); if (vec.size() != noutputs) { - myerr = strdup("unexpected tuple size"); - return 1; + caml_failwith("unexpected tuple size"); } for (int i = 0; i < noutputs; ++i) outputs[i] = new torch::jit::IValue(vec[i]); - return 0; ) - return 1; } +void ati_to_generic_list(ivalue i, + ivalue *outputs, + int noutputs) { + PROTECT( + auto vec = i->toList(); + if (vec.size() != noutputs) { + caml_invalid_argument("unexpected list size"); + } + for (int i = 0; i < noutputs; ++i) + outputs[i] = new torch::jit::IValue(vec[i]); + ) +} -int ati_free(ivalue i) { +void ati_to_generic_dict(ivalue i, + ivalue *outputs, + int noutputs) { PROTECT( - delete(i); - return 0; + auto dict = i->toGenericDict(); + if (dict.size() != noutputs) { + caml_invalid_argument("unexpected dict size"); + } + int k = 0; + for (auto it = dict.begin(); it != dict.end(); ++it) { + outputs[k++] = new torch::jit::IValue(it->key()); + outputs[k++] = new torch::jit::IValue(it->value()); + } ) - return 1; +} + +void ati_to_int_list(ivalue i, + int64_t *outputs, + int noutputs) { + PROTECT( + auto vec = i->toIntList(); + if (vec.size() != noutputs) { + caml_invalid_argument("unexpected list size"); + } + for (int i = 0; i < noutputs; ++i) + outputs[i] = vec[i]; + ) +} + +void ati_to_double_list(ivalue i, + double *outputs, + int noutputs) { + PROTECT( + auto vec = i->toDoubleList(); + if (vec.size() != noutputs) { + caml_invalid_argument("unexpected list size"); + } + for (int i = 0; i < noutputs; ++i) + outputs[i] = vec[i]; + ) +} + +void ati_to_bool_list(ivalue i, + char *outputs, + int noutputs) { + PROTECT( + auto vec = i->toBoolList(); + if (vec.size() != noutputs) { + caml_invalid_argument("unexpected list size"); + } + for (int i = 0; i < noutputs; ++i) + outputs[i] = vec[i]; + ) +} + +void ati_to_tensor_list(ivalue i, + tensor *outputs, + int noutputs) { + PROTECT( + auto vec = i->toTensorList(); + if (vec.size() != noutputs) { + caml_invalid_argument("unexpected tuple size"); + } + for (int i = 0; i < noutputs; ++i) + outputs[i] = new torch::Tensor(vec[i]); + ) +} + +void ati_free(ivalue i) { + delete(i); } #include "torch_api_generated.cpp.h" diff --git a/deps/c_wrapper/torch_api.h b/deps/c_wrapper/torch_api.h index 6b74e07a..0b60ac09 100644 --- a/deps/c_wrapper/torch_api.h +++ b/deps/c_wrapper/torch_api.h @@ -9,15 +9,11 @@ typedef torch::Scalar *scalar; typedef torch::optim::Optimizer *optimizer; typedef torch::jit::script::Module *module; typedef torch::jit::IValue *ivalue; -typedef torch::NoGradGuard *ngg; -char* myerr = ""; #define PROTECT(x) \ try { \ x \ } catch (const exception& e) { \ - myerr = strdup(e.what()); \ - /* jl_error(strdup(e.what())); */ \ - /* throw(e.what()); */ \ + caml_failwith(strdup(e.what())); \ } #else typedef void *tensor; @@ -25,59 +21,63 @@ typedef void *optimizer; typedef void *scalar; typedef void *module; typedef void *ivalue; -typedef void *ngg; #endif -int get_last_error(char *); -int flush_error(); - -int at_manual_seed(int64_t); -int at_new_tensor(tensor *); -int at_empty_cache(); -int at_no_grad(int flag); -int at_sync(); -int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); -int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); -int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); -int at_float_vec(double *values, int value_len, int type); -int at_int_vec(int64_t *values, int value_len, int type); - -int at_defined(int *i, tensor); -int at_dim(int *i, tensor); -int at_shape(tensor, int *); -int at_scalar_type(int *i, tensor); - -int at_backward(tensor, int, int); -int at_requires_grad(int *i, tensor); +void at_manual_seed(int64_t); +tensor at_new_tensor(); +tensor at_tensor_of_data(void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); +void at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); +tensor at_float_vec(double *values, int value_len, int type); +tensor at_int_vec(int64_t *values, int value_len, int type); + +int at_defined(tensor); +int at_is_sparse(tensor); +int at_device(tensor); +int at_dim(tensor); +void at_shape(tensor, int *); +void at_stride(tensor, int *); +int at_scalar_type(tensor); + +void at_autocast_clear_cache(); +int at_autocast_decrement_nesting(); +int at_autocast_increment_nesting(); +int at_autocast_is_enabled(); +int at_autocast_set_enabled(int b); + +void at_backward(tensor, int, int); +int at_requires_grad(tensor); int at_grad_set_enabled(int); -int at_get(tensor *, tensor, int index); -int at_fill_double(tensor, double); -int at_fill_int64(tensor, int64_t); +tensor at_get(tensor, int index); +void at_fill_double(tensor, double); +void at_fill_int64(tensor, int64_t); -int at_double_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); -int at_int64_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); -int at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); -int at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); +double at_double_value_at_indexes(tensor, int *indexes, int indexes_len); +int64_t at_int64_value_at_indexes(tensor, int *indexes, int indexes_len); +void at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); +void at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); -int at_copy_(tensor dst, tensor src); +void at_copy_(tensor dst, tensor src); -int at_print(tensor); -// char *at_to_string(tensor, int line_size); -int at_save(tensor, char *filename); +void at_print(tensor); +char *at_to_string(tensor, int line_size); +void at_save(tensor, char *filename); tensor at_load(char *filename); -int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +int at_get_num_threads(); +void at_set_num_threads(int n_threads); + +void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi] takes as input an array of nullptr for [tensors]. */ -int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi_] takes as input an array of allocation [tensors]. */ -int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); +void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); -int at_load_callback(char *filename, void (*f)(char *, tensor)); +void at_load_callback(char *filename, void (*f)(char *, tensor)); -int at_free(tensor); +void at_free(tensor); -int at_run_backward(tensor *tensors, +void at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs, @@ -85,58 +85,79 @@ int at_run_backward(tensor *tensors, int keep_graph, int create_graph); -int ato_adam(optimizer *, double learning_rate, +optimizer ato_adam(double learning_rate, double beta1, double beta2, - double weight_decay); -int ato_rmsprop(optimizer *, double learning_rate, + double weight_decay, + double eps); +optimizer ato_rmsprop(double learning_rate, double alpha, double eps, double weight_decay, double momentum, int centered); -int ato_sgd(optimizer *, double learning_rate, +optimizer ato_sgd(double learning_rate, double momentum, double dampening, double weight_decay, int nesterov); -int ato_add_parameters(optimizer, tensor *, int ntensors); -int ato_set_learning_rate(optimizer, double learning_rate); -int ato_set_momentum(optimizer, double momentum); -int ato_zero_grad(optimizer); -int ato_step(optimizer); -int ato_free(optimizer); - -int ats_int(scalar *, int64_t); -int ats_float(scalar *, double); -int ats_free(scalar); - -int atc_cuda_device_count(int *); -int atc_cuda_is_available(int *); -int atc_cudnn_is_available(int *); -int atc_set_benchmark_cudnn(int b); - -int atm_load(char *, module *); -int atm_forward(tensor *, module, tensor *tensors, int ntensors); -int atm_forward_(ivalue *, module, +void ato_add_parameters(optimizer, tensor *, int ntensors); +void ato_set_learning_rate(optimizer, double learning_rate); +void ato_set_momentum(optimizer, double momentum); +void ato_zero_grad(optimizer); +void ato_step(optimizer); +void ato_free(optimizer); + +scalar ats_int(int64_t); +scalar ats_float(double); +void ats_free(scalar); + +int atc_cuda_device_count(); +int atc_cuda_is_available(); +int atc_cudnn_is_available(); +void atc_set_benchmark_cudnn(int b); + +module atm_load(char *); +tensor atm_forward(module, tensor *tensors, int ntensors); +ivalue atm_forward_(module, ivalue *ivalues, int nivalues); -int atm_free(module); - -int ati_tensor(ivalue *, tensor); -int ati_int(ivalue *, int64_t); -int ati_double(ivalue *, double); -int ati_tuple(ivalue *, ivalue *, int); - -int ati_to_tensor(tensor *, ivalue); -int ati_to_int(int64_t *, ivalue); -int ati_to_double(double *, ivalue); -int ati_tuple_length(int *, ivalue); -int ati_to_tuple(ivalue, ivalue *, int); - -int ati_tag(int *, ivalue); - -int ati_free(ivalue); +void atm_free(module); + +ivalue ati_none(); +ivalue ati_tensor(tensor); +ivalue ati_bool(int); +ivalue ati_int(int64_t); +ivalue ati_double(double); +ivalue ati_tuple(ivalue *, int); +ivalue ati_string(char *); +ivalue ati_tuple(ivalue *, int); +ivalue ati_generic_list(ivalue *, int); +ivalue ati_generic_dict(ivalue *, int); +ivalue ati_int_list(int64_t *, int); +ivalue ati_double_list(double *, int); +ivalue ati_bool_list(char *, int); +ivalue ati_string_list(char **, int); +ivalue ati_tensor_list(tensor *, int); + +tensor ati_to_tensor(ivalue); +int64_t ati_to_int(ivalue); +double ati_to_double(ivalue); +char *ati_to_string(ivalue); +int ati_to_bool(ivalue); +int ati_length(ivalue); +int ati_tuple_length(ivalue); +void ati_to_tuple(ivalue, ivalue *, int); +void ati_to_generic_list(ivalue, ivalue *, int); +void ati_to_generic_dict(ivalue, ivalue *, int); +void ati_to_int_list(ivalue, int64_t *, int); +void ati_to_double_list(ivalue, double *, int); +void ati_to_bool_list(ivalue, char *, int); +void ati_to_tensor_list(ivalue, tensor *, int); + +int ati_tag(ivalue); + +void ati_free(ivalue); #include "torch_api_generated.h" From f82525a21bb3c0192fc02e07a2a0a7ba2a23ad6c Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 17 Nov 2023 01:11:35 +0100 Subject: [PATCH 06/12] deps/c_wrapper: Renamed doeye_caml to torch_c_api --- deps/c_wrapper/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/deps/c_wrapper/CMakeLists.txt b/deps/c_wrapper/CMakeLists.txt index 4576d442..b32dedec 100644 --- a/deps/c_wrapper/CMakeLists.txt +++ b/deps/c_wrapper/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0 FATAL_ERROR) -project(doeye_caml) +project(torch_c_api) set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR};${CMAKE_MODULE_PATH}") @@ -7,7 +7,7 @@ find_package(Torch REQUIRED) include_directories(SYSTEM path) -add_library(doeye_caml "SHARED" torch_api.cpp) -target_link_libraries(doeye_caml "${TORCH_LIBRARIES}") +add_library(torch_c_api "SHARED" torch_api.cpp) +target_link_libraries(torch_c_api "${TORCH_LIBRARIES}") -set_property(TARGET doeye_caml PROPERTY CXX_STANDARD 14) +set_property(TARGET torch_c_api PROPERTY CXX_STANDARD 14) From 075ecf725e3ed18126826f788e0f04c41ae7e5e7 Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 11 Aug 2023 16:54:46 +0200 Subject: [PATCH 07/12] Updated generated wrapper for Torch v1.10 --- deps/c_wrapper/torch_api_generated.cpp.h | 12279 +++++++++++++++++---- deps/c_wrapper/torch_api_generated.h | 1391 ++- 2 files changed, 11171 insertions(+), 2499 deletions(-) diff --git a/deps/c_wrapper/torch_api_generated.cpp.h b/deps/c_wrapper/torch_api_generated.cpp.h index 36a44c4b..e4a649b4 100644 --- a/deps/c_wrapper/torch_api_generated.cpp.h +++ b/deps/c_wrapper/torch_api_generated.cpp.h @@ -1,903 +1,914 @@ // THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -int atg_abs(tensor *out__, tensor self) { +int atg___and__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::abs(*self); + auto outputs__ = torch::__and__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_abs_(tensor *out__, tensor self) { +int atg___and__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::abs_(*self); + auto outputs__ = torch::__and__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_abs_out(tensor *out__, tensor out, tensor self) { +int atg___iand__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::abs_out(*out, *self); + auto outputs__ = self->__iand__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_acos(tensor *out__, tensor self) { +int atg___iand__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::acos(*self); + auto outputs__ = self->__iand__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_acos_(tensor *out__, tensor self) { +int atg___ilshift__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::acos_(*self); + auto outputs__ = self->__ilshift__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_acos_out(tensor *out__, tensor out, tensor self) { +int atg___ilshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::acos_out(*out, *self); + auto outputs__ = self->__ilshift__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___ior__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = self->__ior__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___ior__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = self->__ior__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___irshift__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = self->__irshift__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___irshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = self->__irshift__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self) { +int atg___ixor__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool3d_backward(*grad_output, *self); + auto outputs__ = self->__ixor__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { +int atg___ixor__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool3d_backward_out(*grad_input, *grad_output, *self); + auto outputs__ = self->__ixor__(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___lshift__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_avg_pool3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::__lshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___lshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::__lshift__(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___or__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::__or__(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { +int atg___or__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool2d_backward(*grad_output, *self, *indices); + auto outputs__ = torch::__or__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { +int atg___rshift__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool2d_backward_out(*grad_input, *grad_output, *self, *indices); + auto outputs__ = torch::__rshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___rshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool2d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::__rshift__(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg___xor__(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::__xor__(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { +int atg___xor__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::adaptive_max_pool3d_backward(*grad_output, *self, *indices); + auto outputs__ = torch::__xor__(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { +int atg__adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::adaptive_max_pool3d_backward_out(*grad_input, *grad_output, *self, *indices); + auto outputs__ = torch::_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { +int atg__adaptive_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( - auto outputs__ = torch::adaptive_max_pool3d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::_adaptive_avg_pool2d_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_add(tensor *out__, tensor self, tensor other) { +int atg__adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::add(*self, *other); + auto outputs__ = torch::_adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_add1(tensor *out__, tensor self, scalar other) { +int atg__adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( - auto outputs__ = torch::add(*self, *other); + auto outputs__ = torch::_adaptive_avg_pool3d_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_add_(tensor *out__, tensor self, tensor other) { +int atg__add_batch_dim(tensor *out__, tensor self, int64_t batch_dim, int64_t level) { PROTECT( - auto outputs__ = self->add_(*other); + auto outputs__ = torch::_add_batch_dim(*self, batch_dim, level); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_add_1(tensor *out__, tensor self, scalar other) { +int atg__add_relu(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = self->add_(*other); + auto outputs__ = torch::_add_relu(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg__add_relu_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::add_out(*out, *self, *other); + auto outputs__ = torch::_add_relu_(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { +int atg__add_relu_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::addbmm(*self, *batch1, *batch2); + auto outputs__ = torch::_add_relu_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { +int atg__add_relu_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->addbmm_(*batch1, *batch2); + auto outputs__ = torch::_add_relu(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { +int atg__add_relu_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::addbmm_out(*out, *self, *batch1, *batch2); + auto outputs__ = torch::_add_relu_(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +int atg__aminmax(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::addcdiv(*self, *tensor1, *tensor2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_aminmax(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +int atg__aminmax_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = self->addcdiv_(*tensor1, *tensor2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_aminmax(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { +int atg__amp_update_scale_(tensor *out__, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { PROTECT( - auto outputs__ = torch::addcdiv_out(*out, *self, *tensor1, *tensor2); + auto outputs__ = torch::_amp_update_scale_(*self, *growth_tracker, *found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +int atg__baddbmm_mkl_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = torch::addcmul(*self, *tensor1, *tensor2); + auto outputs__ = torch::_baddbmm_mkl_(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +int atg__cast_byte(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = self->addcmul_(*tensor1, *tensor2); + auto outputs__ = torch::_cast_Byte(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { +int atg__cast_char(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addcmul_out(*out, *self, *tensor1, *tensor2); + auto outputs__ = torch::_cast_Char(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { +int atg__cast_double(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addmm(*self, *mat1, *mat2); + auto outputs__ = torch::_cast_Double(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { +int atg__cast_float(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = self->addmm_(*mat1, *mat2); + auto outputs__ = torch::_cast_Float(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { +int atg__cast_half(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addmm_out(*out, *self, *mat1, *mat2); + auto outputs__ = torch::_cast_Half(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { +int atg__cast_int(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addmv(*self, *mat, *vec); + auto outputs__ = torch::_cast_Int(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { +int atg__cast_long(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addmv_(*self, *mat, *vec); + auto outputs__ = torch::_cast_Long(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec) { +int atg__cast_short(tensor *out__, tensor self, int non_blocking) { PROTECT( - auto outputs__ = torch::addmv_out(*out, *self, *mat, *vec); + auto outputs__ = torch::_cast_Short(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { +int atg__cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::addr(*self, *vec1, *vec2); + auto outputs__ = torch::_cat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { +int atg__cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = self->addr_(*vec1, *vec2); + auto outputs__ = torch::_cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { +int atg__cdist_backward(tensor *out__, tensor grad, tensor x1, tensor x2, double p, tensor cdist) { PROTECT( - auto outputs__ = torch::addr_out(*out, *self, *vec1, *vec2); + auto outputs__ = torch::_cdist_backward(*grad, *x1, *x2, p, *cdist); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { +int atg__cholesky_solve_helper(tensor *out__, tensor self, tensor A, int upper) { PROTECT( - auto outputs__ = torch::affine_grid_generator(*theta, torch::IntArrayRef(size_data, size_len), (bool)align_corners); + auto outputs__ = torch::_cholesky_solve_helper(*self, *A, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size_data, int size_len, int align_corners) { +int atg__coalesce(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::affine_grid_generator_backward(*grad, torch::IntArrayRef(size_data, size_len), (bool)align_corners); + auto outputs__ = torch::_coalesce(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_alias(tensor *out__, tensor self) { +int atg__coalesced_(tensor *out__, tensor self, int coalesced) { PROTECT( - auto outputs__ = torch::alias(*self); + auto outputs__ = self->_coalesced_((bool)coalesced); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_align_as(tensor *out__, tensor self, tensor other) { +int atg__compute_linear_combination(tensor *out__, tensor input, tensor coefficients) { PROTECT( - auto outputs__ = self->align_as(*other); + auto outputs__ = torch::_compute_linear_combination(*input, *coefficients); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { +int atg__compute_linear_combination_out(tensor *out__, tensor out, tensor input, tensor coefficients) { PROTECT( - auto outputs__ = torch::align_tensors(of_carray_tensor(tensors_data, tensors_len)); - int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; - // return out__; + auto outputs__ = torch::_compute_linear_combination_out(*out, *input, *coefficients); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_all(tensor *out__, tensor self) { +int atg__conj(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::all(*self); + auto outputs__ = torch::_conj(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_all1(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg__conj_physical(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::all(*self, dim, (bool)keepdim); + auto outputs__ = torch::_conj_physical(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { +int atg__conv_depthwise2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::all_out(*out, *self, dim, (bool)keepdim); + auto outputs__ = torch::_conv_depthwise2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { +int atg__conv_depthwise2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::alpha_dropout(*input, p, (bool)train); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_conv_depthwise2d_backward_out(*grad_input, *grad_weight, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { +int atg__conv_depthwise2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::alpha_dropout_(*self, p, (bool)train); + auto outputs__ = torch::_conv_depthwise2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_angle(tensor *out__, tensor self) { +int atg__convert_indices_from_coo_to_csr(tensor *out__, tensor self, int64_t size, int out_int32) { PROTECT( - auto outputs__ = torch::angle(*self); + auto outputs__ = torch::_convert_indices_from_coo_to_csr(*self, size, (bool)out_int32); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_angle_out(tensor *out__, tensor out, tensor self) { +int atg__convert_indices_from_coo_to_csr_out(tensor *out__, tensor out, tensor self, int64_t size, int out_int32) { PROTECT( - auto outputs__ = torch::angle_out(*out, *self); + auto outputs__ = torch::_convert_indices_from_coo_to_csr_out(*out, *self, size, (bool)out_int32); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_any(tensor *out__, tensor self) { +int atg__convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32) { PROTECT( - auto outputs__ = torch::any(*self); + auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_any1(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg__convolution_deprecated(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled) { PROTECT( - auto outputs__ = torch::any(*self, dim, (bool)keepdim); + auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { +int atg__convolution_mode(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::any_out(*out, *self, dim, (bool)keepdim); + auto outputs__ = torch::_convolution_mode(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) { +int atg__convolution_nogroup(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len) { PROTECT( - auto outputs__ = torch::arange(*end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_convolution_nogroup(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_arange1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +int atg__copy_from(tensor *out__, tensor self, tensor dst, int non_blocking) { PROTECT( - auto outputs__ = torch::arange(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_copy_from(*self, *dst, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_arange2(tensor *out__, scalar start, scalar end, scalar step, int options_kind, int options_device) { +int atg__copy_from_and_resize(tensor *out__, tensor self, tensor dst) { PROTECT( - auto outputs__ = torch::arange(*start, *end, *step, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_copy_from_and_resize(*self, *dst); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_arange_out(tensor *out__, tensor out, scalar end) { +int atg__ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int zero_infinity) { PROTECT( - auto outputs__ = torch::arange_out(*out, *end); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, (bool)zero_infinity); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_arange_out1(tensor *out__, tensor out, scalar start, scalar end) { +int atg__ctc_loss_backward(tensor *out__, tensor grad, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, tensor neg_log_likelihood, tensor log_alpha, int64_t blank, int zero_infinity) { PROTECT( - auto outputs__ = torch::arange_out(*out, *start, *end); + auto outputs__ = torch::_ctc_loss_backward(*grad, *log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), *neg_log_likelihood, *log_alpha, blank, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg__cudnn_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int deterministic, int zero_infinity) { PROTECT( - auto outputs__ = torch::argmax(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_cudnn_ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, (bool)deterministic, (bool)zero_infinity); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg__cudnn_init_dropout_state(tensor *out__, double dropout, int train, int64_t dropout_seed, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::argmin(*self, dim, (bool)keepdim); + auto outputs__ = torch::_cudnn_init_dropout_state(dropout, (bool)train, dropout_seed, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { +int atg__cudnn_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor weight_buf, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { PROTECT( - auto outputs__ = torch::argsort(*self, dim, (bool)descending); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_cudnn_rnn(*input, of_carray_tensor(weight_data, weight_len), weight_stride0, (weight_buf ? *weight_buf : torch::Tensor()), *hx, (cx ? *cx : torch::Tensor()), mode, hidden_size, proj_size, num_layers, (bool)batch_first, dropout, (bool)train, (bool)bidirectional, torch::IntArrayRef(batch_sizes_data, batch_sizes_len), (dropout_state ? *dropout_state : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); return 0; ) return 1; } -int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +int atg__cudnn_rnn_flatten_weight(tensor *out__, tensor *weight_arr_data, int weight_arr_len, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, int bidirectional) { PROTECT( - auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); + auto outputs__ = torch::_cudnn_rnn_flatten_weight(of_carray_tensor(weight_arr_data, weight_arr_len), weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, (bool)batch_first, (bool)bidirectional); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +int atg__det_lu_based_helper(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_det_lu_based_helper(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_asin(tensor *out__, tensor self) { +int atg__det_lu_based_helper_backward_helper(tensor *out__, tensor det_grad, tensor det, tensor self, tensor lu, tensor pivs) { PROTECT( - auto outputs__ = torch::asin(*self); + auto outputs__ = torch::_det_lu_based_helper_backward_helper(*det_grad, *det, *self, *lu, *pivs); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_asin_(tensor *out__, tensor self) { +int atg__dim_arange(tensor *out__, tensor like, int64_t dim) { PROTECT( - auto outputs__ = torch::asin_(*self); + auto outputs__ = torch::_dim_arange(*like, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_asin_out(tensor *out__, tensor out, tensor self) { +int atg__dirichlet_grad(tensor *out__, tensor x, tensor alpha, tensor total) { PROTECT( - auto outputs__ = torch::asin_out(*out, *self); + auto outputs__ = torch::_dirichlet_grad(*x, *alpha, *total); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_atan(tensor *out__, tensor self) { +int atg__embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { PROTECT( - auto outputs__ = torch::atan(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset, padding_idx); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); return 0; ) return 1; } -int atg_atan2(tensor *out__, tensor self, tensor other) { +int atg__embedding_bag_backward(tensor *out__, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int64_t padding_idx) { PROTECT( - auto outputs__ = torch::atan2(*self, *other); + auto outputs__ = torch::_embedding_bag_backward(*grad, *indices, *offsets, *offset2bag, *bag_size, *maximum_indices, num_weights, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_atan2_(tensor *out__, tensor self, tensor other) { +int atg__embedding_bag_dense_backward(tensor *out__, tensor grad, tensor indices, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx) { PROTECT( - auto outputs__ = self->atan2_(*other); + auto outputs__ = torch::_embedding_bag_dense_backward(*grad, *indices, *offset2bag, *bag_size, *maximum_indices, num_weights, (bool)scale_grad_by_freq, mode, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg__embedding_bag_forward_only(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { PROTECT( - auto outputs__ = torch::atan2_out(*out, *self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_embedding_bag_forward_only(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset, padding_idx); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); return 0; ) return 1; } -int atg_atan_(tensor *out__, tensor self) { +int atg__embedding_bag_per_sample_weights_backward(tensor *out__, tensor grad, tensor weight, tensor indices, tensor offsets, tensor offset2bag, int64_t mode, int64_t padding_idx) { PROTECT( - auto outputs__ = torch::atan_(*self); + auto outputs__ = torch::_embedding_bag_per_sample_weights_backward(*grad, *weight, *indices, *offsets, *offset2bag, mode, padding_idx); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_atan_out(tensor *out__, tensor out, tensor self) { +int atg__embedding_bag_sparse_backward(tensor *out__, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx) { PROTECT( - auto outputs__ = torch::atan_out(*out, *self); + auto outputs__ = torch::_embedding_bag_sparse_backward(*grad, *indices, *offsets, *offset2bag, *bag_size, num_weights, (bool)scale_grad_by_freq, mode, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { +int atg__empty_affine_quantized(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device, double scale, int64_t zero_point) { PROTECT( - auto outputs__ = torch::avg_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad); + auto outputs__ = torch::_empty_affine_quantized(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), scale, zero_point); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__empty_per_channel_affine_quantized(tensor *out__, int64_t *size_data, int size_len, tensor scales, tensor zero_points, int64_t axis, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::_empty_per_channel_affine_quantized(torch::IntArrayRef(size_data, size_len), *scales, *zero_points, axis, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__euclidean_dist(tensor *out__, tensor x1, tensor x2) { PROTECT( - auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::_euclidean_dist(*x1, *x2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fake_quantize_learnable_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( - auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::_fake_quantize_learnable_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max, grad_factor); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fake_quantize_learnable_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( - auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_fake_quantize_learnable_per_channel_affine_backward(*grad, *self, *scale, *zero_point, axis, quant_min, quant_max, grad_factor); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fake_quantize_learnable_per_tensor_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( - auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::_fake_quantize_learnable_per_tensor_affine(*self, *scale, *zero_point, quant_min, quant_max, grad_factor); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( - auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_fake_quantize_learnable_per_tensor_affine_backward(*grad, *self, *scale, *zero_point, quant_min, quant_max, grad_factor); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, tensor fake_quant_enabled, int64_t quant_min, int64_t quant_max) { PROTECT( - auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(*self, *scale, *zero_point, *fake_quant_enabled, quant_min, quant_max); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +int atg__fft_c2c(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward) { PROTECT( - auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::_fft_c2c(*self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)forward); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { +int atg__fft_c2c_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward) { PROTECT( - auto outputs__ = torch::baddbmm(*self, *batch1, *batch2); + auto outputs__ = torch::_fft_c2c_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)forward); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { +int atg__fft_c2r(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size) { PROTECT( - auto outputs__ = self->baddbmm_(*batch1, *batch2); + auto outputs__ = torch::_fft_c2r(*self, torch::IntArrayRef(dim_data, dim_len), normalization, last_dim_size); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { +int atg__fft_c2r_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size) { PROTECT( - auto outputs__ = torch::baddbmm_out(*out, *self, *batch1, *batch2); + auto outputs__ = torch::_fft_c2r_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, last_dim_size); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +int atg__fft_r2c(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided) { PROTECT( - auto outputs__ = torch::bartlett_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_fft_r2c(*self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)onesided); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bartlett_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +int atg__fft_r2c_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided) { PROTECT( - auto outputs__ = torch::bartlett_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_fft_r2c_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)onesided); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled) { +int atg__fused_dropout(tensor *out__, tensor self, double p) { PROTECT( - auto outputs__ = torch::batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps, (bool)cudnn_enabled); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_fused_dropout(*self, p); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu) { +int atg__fused_moving_avg_obs_fq_helper(tensor *out__, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant) { PROTECT( - auto outputs__ = torch::batch_norm_backward_elemt(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), *mean_dy, *mean_dy_xmu); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_fused_moving_avg_obs_fq_helper(*self, *observer_on, *fake_quant_on, *running_min, *running_max, *scale, *zero_point, averaging_const, quant_min, quant_max, ch_axis, (bool)per_row_fake_quant, (bool)symmetric_quant); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g) { +int atg__fw_primal(tensor *out__, tensor self, int64_t level) { PROTECT( - auto outputs__ = torch::batch_norm_backward_reduce(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), (bool)input_g, (bool)weight_g, (bool)bias_g); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - out__[3] = new torch::Tensor(std::get<3>(outputs__)); + auto outputs__ = self->_fw_primal(level); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { +int atg__gather_sparse_backward(tensor *out__, tensor self, int64_t dim, tensor index, tensor grad) { PROTECT( - auto outputs__ = torch::batch_norm_elemt(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); + auto outputs__ = torch::_gather_sparse_backward(*self, dim, *index, *grad); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { +int atg__grid_sampler_2d_cpu_fallback(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( - auto outputs__ = torch::batch_norm_elemt_out(*out, *input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); + auto outputs__ = torch::_grid_sampler_2d_cpu_fallback(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count) { +int atg__grid_sampler_2d_cpu_fallback_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( - auto outputs__ = torch::batch_norm_gather_stats(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, count); + auto outputs__ = torch::_grid_sampler_2d_cpu_fallback_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; @@ -905,3601 +916,9059 @@ int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor return 1; } -int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len) { +int atg__index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( - auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, torch::IntArrayRef(counts_data, counts_len)); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::_index_copy_(*self, dim, *index, *source); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { +int atg__index_put_impl_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate, int unsafe) { PROTECT( - auto outputs__ = torch::batch_norm_stats(*input, eps); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::_index_put_impl_(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate, (bool)unsafe); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean, tensor running_var, double momentum) { +int atg__indices(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::batch_norm_update_stats(*input, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = self->_indices(); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bernoulli(tensor *out__, tensor self) { +int atg__inverse_helper(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::bernoulli(*self); + auto outputs__ = torch::_inverse_helper(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bernoulli1(tensor *out__, tensor self, double p) { +int atg__linalg_inv_out_helper_(tensor *out__, tensor self, tensor infos_lu, tensor infos_getri) { PROTECT( - auto outputs__ = torch::bernoulli(*self, p); + auto outputs__ = torch::_linalg_inv_out_helper_(*self, *infos_lu, *infos_getri); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bernoulli_(tensor *out__, tensor self, tensor p) { +int atg__linalg_qr_helper(tensor *out__, tensor self, char * mode) { PROTECT( - auto outputs__ = self->bernoulli_(*p); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_linalg_qr_helper(*self, std::string(mode)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_bernoulli_1(tensor *out__, tensor self, double p) { +int atg__log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = self->bernoulli_(p); + auto outputs__ = torch::_log_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { +int atg__log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::bernoulli_out(*out, *self); + auto outputs__ = torch::_log_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, tensor bias) { +int atg__log_softmax_backward_data_out(tensor *out__, tensor out, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::bilinear(*input1, *input2, *weight, (bias ? *bias : torch::Tensor())); + auto outputs__ = torch::_log_softmax_backward_data_out(*out, *grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction) { +int atg__log_softmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = torch::binary_cross_entropy(*self, *target, (weight ? *weight : torch::Tensor()), reduction); + auto outputs__ = torch::_log_softmax_out(*out, *self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { +int atg__logcumsumexp(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = torch::binary_cross_entropy_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); + auto outputs__ = torch::_logcumsumexp(*self, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_binary_cross_entropy_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { +int atg__logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { PROTECT( - auto outputs__ = torch::binary_cross_entropy_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); + auto outputs__ = torch::_logcumsumexp_out(*out, *self, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction) { +int atg__lu_with_info(tensor *out__, tensor self, int pivot, int check_errors) { PROTECT( - auto outputs__ = torch::binary_cross_entropy_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_lu_with_info(*self, (bool)pivot, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { +int atg__make_dual(tensor *out__, tensor primal, tensor tangent, int64_t level) { PROTECT( - auto outputs__ = torch::binary_cross_entropy_with_logits(*self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); + auto outputs__ = torch::_make_dual(*primal, *tangent, level); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { +int atg__make_per_channel_quantized_tensor(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis) { PROTECT( - auto outputs__ = torch::binary_cross_entropy_with_logits_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); + auto outputs__ = torch::_make_per_channel_quantized_tensor(*self, *scale, *zero_point, axis); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) { +int atg__make_per_tensor_quantized_tensor(tensor *out__, tensor self, double scale, int64_t zero_point) { PROTECT( - auto outputs__ = torch::bincount(*self, (weights ? *weights : torch::Tensor()), minlength); + auto outputs__ = torch::_make_per_tensor_quantized_tensor(*self, scale, zero_point); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_not(tensor *out__, tensor self) { +int atg__masked_scale(tensor *out__, tensor self, tensor mask, double scale) { PROTECT( - auto outputs__ = torch::bitwise_not(*self); + auto outputs__ = torch::_masked_scale(*self, *mask, scale); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_not_(tensor *out__, tensor self) { +int atg__mkldnn_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( - auto outputs__ = self->bitwise_not_(); + auto outputs__ = torch::_mkldnn_reshape(*self, torch::IntArrayRef(shape_data, shape_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { +int atg__mkldnn_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = torch::bitwise_not_out(*out, *self); + auto outputs__ = torch::_mkldnn_transpose(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { +int atg__mkldnn_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = torch::bitwise_xor(*self, *other); + auto outputs__ = torch::_mkldnn_transpose_(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor1(tensor *out__, tensor self, tensor other) { +int atg__neg_view(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::bitwise_xor(*self, *other); + auto outputs__ = torch::_neg_view(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { +int atg__nnpack_spatial_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = self->bitwise_xor_(*other); + auto outputs__ = torch::_nnpack_spatial_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor_1(tensor *out__, tensor self, tensor other) { +int atg__nnpack_spatial_convolution_backward_input(tensor *out__, tensor input, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = self->bitwise_xor_(*other); + auto outputs__ = torch::_nnpack_spatial_convolution_backward_input(*input, *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg__nnpack_spatial_convolution_backward_weight(tensor *out__, tensor input, int64_t *weightsize_data, int weightsize_len, tensor grad_output, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); + auto outputs__ = torch::_nnpack_spatial_convolution_backward_weight(*input, torch::IntArrayRef(weightsize_data, weightsize_len), *grad_output, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bitwise_xor_out1(tensor *out__, tensor out, tensor self, scalar other) { +int atg__pack_padded_sequence(tensor *out__, tensor input, tensor lengths, int batch_first) { PROTECT( - auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_pack_padded_sequence(*input, *lengths, (bool)batch_first); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +int atg__pack_padded_sequence_backward(tensor *out__, tensor grad, int64_t *input_size_data, int input_size_len, tensor batch_sizes, int batch_first) { PROTECT( - auto outputs__ = torch::blackman_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::_pack_padded_sequence_backward(*grad, torch::IntArrayRef(input_size_data, input_size_len), *batch_sizes, (bool)batch_first); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_blackman_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +int atg__pad_packed_sequence(tensor *out__, tensor data, tensor batch_sizes, int batch_first, scalar padding_value, int64_t total_length) { PROTECT( - auto outputs__ = torch::blackman_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_pad_packed_sequence(*data, *batch_sizes, (bool)batch_first, *padding_value, total_length); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_bmm(tensor *out__, tensor self, tensor mat2) { +int atg__pdist_backward(tensor *out__, tensor grad, tensor self, double p, tensor pdist) { PROTECT( - auto outputs__ = torch::bmm(*self, *mat2); + auto outputs__ = torch::_pdist_backward(*grad, *self, p, *pdist); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { +int atg__pin_memory(tensor *out__, tensor self, int device) { PROTECT( - auto outputs__ = torch::bmm_out(*out, *self, *mat2); + auto outputs__ = torch::_pin_memory(*self, device_of_int(device)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_broadcast_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { +int atg__remove_batch_dim(tensor *out__, tensor self, int64_t level, int64_t batch_size, int64_t out_dim) { PROTECT( - auto outputs__ = torch::broadcast_tensors(of_carray_tensor(tensors_data, tensors_len)); - int sz = outputs__.size(); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; + auto outputs__ = torch::_remove_batch_dim(*self, level, batch_size, out_dim); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -// tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len) { -// PROTECT( -// auto outputs__ = torch::broadcast_tensors(of_carray_tensor(tensors_data, tensors_len)); -// int sz = outputs__.size(); -// torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); -// for (int i = 0; i < sz; ++i) -// out__[i] = new torch::Tensor(outputs__[i]); -// out__[sz] = nullptr; -// return out__; -// ) -// } - -int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { +int atg__reshape_alias(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = torch::cartesian_prod(of_carray_tensor(tensors_data, tensors_len)); + auto outputs__ = torch::_reshape_alias(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { +int atg__reshape_from_tensor(tensor *out__, tensor self, tensor shape) { PROTECT( - auto outputs__ = torch::cat(of_carray_tensor(tensors_data, tensors_len), dim); + auto outputs__ = torch::_reshape_from_tensor(*self, *shape); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { +int atg__rowwise_prune(tensor *out__, tensor weight, tensor mask, int compressed_indices_dtype) { PROTECT( - auto outputs__ = torch::cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_rowwise_prune(*weight, *mask, torch::ScalarType(compressed_indices_dtype)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { +int atg__s_where(tensor *out__, tensor condition, tensor self, tensor other) { PROTECT( - auto outputs__ = self->cauchy_(median, sigma); + auto outputs__ = torch::_s_where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { +int atg__sample_dirichlet(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode); + auto outputs__ = torch::_sample_dirichlet(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ceil(tensor *out__, tensor self) { +int atg__saturate_weight_to_fp16(tensor *out__, tensor weight) { PROTECT( - auto outputs__ = torch::ceil(*self); + auto outputs__ = torch::_saturate_weight_to_fp16(*weight); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ceil_(tensor *out__, tensor self) { +int atg__segment_reduce_backward(tensor *out__, tensor grad, tensor output, tensor data, char * reduce, tensor lengths, int64_t axis) { PROTECT( - auto outputs__ = torch::ceil_(*self); + auto outputs__ = torch::_segment_reduce_backward(*grad, *output, *data, std::string(reduce), (lengths ? *lengths : torch::Tensor()), axis); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ceil_out(tensor *out__, tensor out, tensor self) { +int atg__shape_as_tensor(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::ceil_out(*out, *self); + auto outputs__ = torch::_shape_as_tensor(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_celu(tensor *out__, tensor self) { +int atg__slow_conv2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, tensor finput) { PROTECT( - auto outputs__ = torch::celu(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_slow_conv2d_backward_out(*grad_input, *grad_weight, *grad_bias, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), *finput); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_celu_(tensor *out__, tensor self) { +int atg__sobol_engine_draw(tensor *out__, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype) { PROTECT( - auto outputs__ = torch::celu_(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_sobol_engine_draw(*quasi, n, *sobolstate, dimension, num_generated, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { +int atg__sobol_engine_ff_(tensor *out__, tensor self, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated) { PROTECT( - auto outputs__ = torch::chain_matmul(of_carray_tensor(matrices_data, matrices_len)); + auto outputs__ = torch::_sobol_engine_ff_(*self, n, *sobolstate, dimension, num_generated); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky(tensor *out__, tensor self, int upper) { +int atg__sobol_engine_initialize_state_(tensor *out__, tensor self, int64_t dimension) { PROTECT( - auto outputs__ = torch::cholesky(*self, (bool)upper); + auto outputs__ = torch::_sobol_engine_initialize_state_(*self, dimension); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { +int atg__sobol_engine_scramble_(tensor *out__, tensor self, tensor ltm, int64_t dimension) { PROTECT( - auto outputs__ = torch::cholesky_inverse(*self, (bool)upper); + auto outputs__ = torch::_sobol_engine_scramble_(*self, *ltm, dimension); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) { +int atg__softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = torch::cholesky_inverse_out(*out, *self, (bool)upper); + auto outputs__ = torch::_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { +int atg__softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::cholesky_out(*out, *self, (bool)upper); + auto outputs__ = torch::_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { +int atg__softmax_backward_data_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::cholesky_solve(*self, *input2, (bool)upper); + auto outputs__ = torch::_softmax_backward_data_out(*grad_input, *grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2, int upper) { +int atg__softmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = torch::cholesky_solve_out(*out, *self, *input2, (bool)upper); + auto outputs__ = torch::_softmax_out(*out, *self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { +int atg__solve_helper(tensor *out__, tensor self, tensor A) { PROTECT( - auto outputs__ = torch::chunk(*self, chunks, dim); - int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; - // return out__; + auto outputs__ = torch::_solve_helper(*self, *A); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { +int atg__sparse_addmm(tensor *out__, tensor self, tensor sparse, tensor dense) { PROTECT( - auto outputs__ = torch::clamp(*self, *min, *max); + auto outputs__ = torch::_sparse_addmm(*self, *sparse, *dense); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { +int atg__sparse_coo_tensor_unsafe(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::clamp_(*self, *min, *max); + auto outputs__ = torch::_sparse_coo_tensor_unsafe(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_max(tensor *out__, tensor self, scalar max) { +int atg__sparse_coo_tensor_with_dims(tensor *out__, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::clamp_max(*self, *max); + auto outputs__ = torch::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_max_(tensor *out__, tensor self, scalar max) { +int atg__sparse_coo_tensor_with_dims_and_tensors(tensor *out__, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, tensor indices, tensor values, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::clamp_max_(*self, *max); + auto outputs__ = torch::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, torch::IntArrayRef(size_data, size_len), *indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { +int atg__sparse_csr_tensor_unsafe(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::clamp_max_out(*out, *self, *max); + auto outputs__ = torch::_sparse_csr_tensor_unsafe(*crow_indices, *col_indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_min(tensor *out__, tensor self, scalar min) { +int atg__sparse_log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = torch::clamp_min(*self, *min); + auto outputs__ = torch::_sparse_log_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_min_(tensor *out__, tensor self, scalar min) { +int atg__sparse_log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::clamp_min_(*self, *min); + auto outputs__ = torch::_sparse_log_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { +int atg__sparse_log_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::clamp_min_out(*out, *self, *min); + auto outputs__ = torch::_sparse_log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { +int atg__sparse_mask_helper(tensor *out__, tensor t, tensor mask_indices) { PROTECT( - auto outputs__ = torch::clamp_out(*out, *self, *min, *max); + auto outputs__ = torch::_sparse_mask_helper(*t, *mask_indices); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_clone(tensor *out__, tensor self) { +int atg__sparse_mm(tensor *out__, tensor sparse, tensor dense) { PROTECT( - auto outputs__ = torch::clone(*self); + auto outputs__ = torch::_sparse_mm(*sparse, *dense); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_coalesce(tensor *out__, tensor self) { +int atg__sparse_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( - auto outputs__ = self->coalesce(); + auto outputs__ = torch::_sparse_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg__sparse_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( - auto outputs__ = torch::col2im(*self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::_sparse_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg__sparse_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::col2im_backward(*grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::_sparse_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_col2im_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg__sparse_sparse_matmul(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::col2im_backward_out(*grad_input, *grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::_sparse_sparse_matmul(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg__sparse_sum(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::col2im_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::_sparse_sum(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement) { +int atg__sparse_sum_backward(tensor *out__, tensor grad, tensor self, int64_t *dim_data, int dim_len) { PROTECT( - auto outputs__ = torch::combinations(*self, r, (bool)with_replacement); + auto outputs__ = torch::_sparse_sum_backward(*grad, *self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conj(tensor *out__, tensor self) { +int atg__sparse_sum_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { PROTECT( - auto outputs__ = torch::conj(*self); + auto outputs__ = torch::_sparse_sum(*self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conj_out(tensor *out__, tensor out, tensor self) { +int atg__sparse_sum_dim_dtype(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int dtype) { PROTECT( - auto outputs__ = torch::conj_out(*out, *self); + auto outputs__ = torch::_sparse_sum(*self, torch::IntArrayRef(dim_data, dim_len), torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_len) { +int atg__sparse_sum_dtype(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::constant_pad_nd(*self, torch::IntArrayRef(pad_data, pad_len)); + auto outputs__ = torch::_sparse_sum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_contiguous(tensor *out__, tensor self) { +int atg__stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = self->contiguous(); + auto outputs__ = torch::_stack(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +int atg__stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + auto outputs__ = torch::_stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +int atg__standard_gamma(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + auto outputs__ = torch::_standard_gamma(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +int atg__standard_gamma_grad(tensor *out__, tensor self, tensor output) { PROTECT( - auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + auto outputs__ = torch::_standard_gamma_grad(*self, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { +int atg__svd_helper(tensor *out__, tensor self, int some, int compute_uv) { PROTECT( - auto outputs__ = torch::conv_tbc(*self, *weight, *bias, pad); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_svd_helper(*self, (bool)some, (bool)compute_uv); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weight, tensor bias, int64_t pad) { +int atg__symeig_helper(tensor *out__, tensor self, int eigenvectors, int upper) { PROTECT( - auto outputs__ = torch::conv_tbc_backward(*self, *input, *weight, *bias, pad); + auto outputs__ = torch::_symeig_helper(*self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +int atg__test_ambiguous_defaults(tensor *out__, tensor dummy, int64_t a, int64_t b) { PROTECT( - auto outputs__ = torch::conv_transpose1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::_test_ambiguous_defaults(*dummy, a, b); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +int atg__test_ambiguous_defaults_b(tensor *out__, tensor dummy, int64_t a, char * b) { PROTECT( - auto outputs__ = torch::conv_transpose2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::_test_ambiguous_defaults(*dummy, a, std::string(b)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +int atg__test_optional_filled_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { PROTECT( - auto outputs__ = torch::conv_transpose3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::_test_optional_filled_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { +int atg__test_optional_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { PROTECT( - auto outputs__ = torch::convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); + auto outputs__ = torch::_test_optional_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { +int atg__test_serialization_subcmul(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::convolution_overrideable(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); + auto outputs__ = torch::_test_serialization_subcmul(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_blocking) { +int atg__test_string_default(tensor *out__, tensor dummy, char * a, char * b) { PROTECT( - auto outputs__ = torch::copy_sparse_to_sparse_(*self, *src, (bool)non_blocking); + auto outputs__ = torch::_test_string_default(*dummy, std::string(a), std::string(b)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cos(tensor *out__, tensor self) { +int atg__thnn_differentiable_gru_cell_backward(tensor *out__, tensor grad_hy, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias) { PROTECT( - auto outputs__ = torch::cos(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_differentiable_gru_cell_backward(*grad_hy, *input_gates, *hidden_gates, *hx, (input_bias ? *input_bias : torch::Tensor()), (hidden_bias ? *hidden_bias : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); return 0; ) return 1; } -int atg_cos_(tensor *out__, tensor self) { +int atg__thnn_differentiable_lstm_cell_backward(tensor *out__, tensor grad_hy, tensor grad_cy, tensor input_gates, tensor hidden_gates, tensor input_bias, tensor hidden_bias, tensor cx, tensor cy) { PROTECT( - auto outputs__ = torch::cos_(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_differentiable_lstm_cell_backward((grad_hy ? *grad_hy : torch::Tensor()), (grad_cy ? *grad_cy : torch::Tensor()), *input_gates, *hidden_gates, (input_bias ? *input_bias : torch::Tensor()), (hidden_bias ? *hidden_bias : torch::Tensor()), *cx, *cy); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); return 0; ) return 1; } -int atg_cos_out(tensor *out__, tensor out, tensor self) { +int atg__thnn_fused_gru_cell(tensor *out__, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias) { PROTECT( - auto outputs__ = torch::cos_out(*out, *self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_fused_gru_cell(*input_gates, *hidden_gates, *hx, (input_bias ? *input_bias : torch::Tensor()), (hidden_bias ? *hidden_bias : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_cosh(tensor *out__, tensor self) { +int atg__thnn_fused_gru_cell_backward(tensor *out__, tensor grad_hy, tensor workspace, int has_bias) { PROTECT( - auto outputs__ = torch::cosh(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_fused_gru_cell_backward(*grad_hy, *workspace, (bool)has_bias); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); return 0; ) return 1; } -int atg_cosh_(tensor *out__, tensor self) { +int atg__thnn_fused_lstm_cell(tensor *out__, tensor input_gates, tensor hidden_gates, tensor cx, tensor input_bias, tensor hidden_bias) { PROTECT( - auto outputs__ = torch::cosh_(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_fused_lstm_cell(*input_gates, *hidden_gates, *cx, (input_bias ? *input_bias : torch::Tensor()), (hidden_bias ? *hidden_bias : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_cosh_out(tensor *out__, tensor out, tensor self) { +int atg__thnn_fused_lstm_cell_backward(tensor *out__, tensor grad_hy, tensor grad_cy, tensor cx, tensor cy, tensor workspace, int has_bias) { PROTECT( - auto outputs__ = torch::cosh_out(*out, *self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_thnn_fused_lstm_cell_backward((grad_hy ? *grad_hy : torch::Tensor()), (grad_cy ? *grad_cy : torch::Tensor()), *cx, *cy, *workspace, (bool)has_bias); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); return 0; ) return 1; } -int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { +int atg__to_copy(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking) { PROTECT( - auto outputs__ = torch::cosine_embedding_loss(*input1, *input2, *target, margin, reduction); + auto outputs__ = torch::_to_copy(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, double eps) { +int atg__to_cpu(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::cosine_similarity(*x1, *x2, dim, eps); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_to_cpu(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { +int atg__trilinear(tensor *out__, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim) { PROTECT( - auto outputs__ = torch::cross(*self, *other, dim); + auto outputs__ = torch::_trilinear(*i1, *i2, *i3, torch::IntArrayRef(expand1_data, expand1_len), torch::IntArrayRef(expand2_data, expand2_len), torch::IntArrayRef(expand3_data, expand3_len), torch::IntArrayRef(sumdim_data, sumdim_len), unroll_dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { +int atg__unique(tensor *out__, tensor self, int sorted, int return_inverse) { PROTECT( - auto outputs__ = torch::cross_out(*out, *self, *other, dim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_unique(*self, (bool)sorted, (bool)return_inverse); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity) { +int atg__unique2(tensor *out__, tensor self, int sorted, int return_inverse, int return_counts) { PROTECT( - auto outputs__ = torch::ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, reduction, (bool)zero_infinity); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_unique2(*self, (bool)sorted, (bool)return_inverse, (bool)return_counts); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_ctc_loss1(tensor *out__, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity) { +int atg__unpack_dual(tensor *out__, tensor dual, int64_t level) { PROTECT( - auto outputs__ = torch::ctc_loss(*log_probs, *targets, *input_lengths, *target_lengths, blank, reduction, (bool)zero_infinity); + auto outputs__ = torch::_unpack_dual(*dual, level); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg__unsafe_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { + PROTECT( + auto outputs__ = torch::_unsafe_view(*self, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W) { +int atg__values(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_affine_grid_generator(*theta, n, C, H, W); + auto outputs__ = self->_values(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W) { +int atg__weight_norm(tensor *out__, tensor v, tensor g, int64_t dim) { PROTECT( - auto outputs__ = torch::cudnn_affine_grid_generator_backward(*grad, n, C, H, W); + auto outputs__ = torch::_weight_norm(*v, *g, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { +int atg__weight_norm_cuda_interface(tensor *out__, tensor v, tensor g, int64_t dim) { PROTECT( - auto outputs__ = torch::cudnn_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); + auto outputs__ = torch::_weight_norm_cuda_interface(*v, *g, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - out__[3] = new torch::Tensor(std::get<3>(outputs__)); return 0; ) return 1; } -int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace) { +int atg__weight_norm_cuda_interface_backward(tensor *out__, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim) { PROTECT( - auto outputs__ = torch::cudnn_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon, *reserveSpace); + auto outputs__ = torch::_weight_norm_cuda_interface_backward(*grad_w, *saved_v, *saved_g, *saved_norms, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg__weight_norm_differentiable_backward(tensor *out__, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim) { PROTECT( - auto outputs__ = torch::cudnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::_weight_norm_differentiable_backward(*grad_w, *saved_v, *saved_g, *saved_norms, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_cudnn_convolution_backward_bias(tensor *out__, tensor grad_output) { +int atg_abs(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_bias(*grad_output); + auto outputs__ = torch::abs(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_abs_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::abs_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_abs_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::abs_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_absolute(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::absolute(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_transpose_backward_bias(tensor *out__, tensor grad_output) { +int atg_absolute_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_bias(*grad_output); + auto outputs__ = self->absolute_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_absolute_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::absolute_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_acos(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::acos(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { +int atg_acos_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_grid_sampler(*self, *grid); + auto outputs__ = torch::acos_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, tensor grad_output) { +int atg_acos_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::cudnn_grid_sampler_backward(*self, *grid, *grad_output); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::acos_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { +int atg_acosh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cumprod(*self, dim, torch::ScalarType(dtype)); + auto outputs__ = torch::acosh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { +int atg_acosh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::cumprod_out(*out, *self, dim, torch::ScalarType(dtype)); + auto outputs__ = torch::acosh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { +int atg_acosh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::cumsum(*self, dim, torch::ScalarType(dtype)); + auto outputs__ = torch::acosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { +int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::cumsum_out(*out, *self, dim, torch::ScalarType(dtype)); + auto outputs__ = torch::adaptive_avg_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_data(tensor *out__, tensor self) { +int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = self->data(); + auto outputs__ = torch::adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dequantize(tensor *out__, tensor self) { +int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::dequantize(*self); + auto outputs__ = torch::adaptive_avg_pool2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_det(tensor *out__, tensor self) { +int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::det(*self); + auto outputs__ = torch::adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_detach(tensor *out__, tensor self) { +int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( - auto outputs__ = torch::detach(*self); + auto outputs__ = torch::adaptive_avg_pool3d_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_detach_(tensor *out__, tensor self) { +int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::detach_(*self); + auto outputs__ = torch::adaptive_avg_pool3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_diag(tensor *out__, tensor self, int64_t diagonal) { +int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::diag(*self, diagonal); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::adaptive_max_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { +int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::diag_embed(*self, offset, dim1, dim2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::adaptive_max_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( - auto outputs__ = torch::diag_out(*out, *self, diagonal); + auto outputs__ = torch::adaptive_max_pool2d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_diagflat(tensor *out__, tensor self, int64_t offset) { +int atg_adaptive_max_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( - auto outputs__ = torch::diagflat(*self, offset); + auto outputs__ = torch::adaptive_max_pool2d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { +int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::diagonal(*self, offset, dim1, dim2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::adaptive_max_pool2d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_digamma(tensor *out__, tensor self) { +int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::digamma(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::adaptive_max_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_digamma_(tensor *out__, tensor self) { +int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( - auto outputs__ = self->digamma_(); + auto outputs__ = torch::adaptive_max_pool3d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_digamma_out(tensor *out__, tensor out, tensor self) { +int atg_adaptive_max_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( - auto outputs__ = torch::digamma_out(*out, *self); + auto outputs__ = torch::adaptive_max_pool3d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dist(tensor *out__, tensor self, tensor other) { +int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( - auto outputs__ = torch::dist(*self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::adaptive_max_pool3d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_div(tensor *out__, tensor self, tensor other) { +int atg_add(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::div(*self, *other); + auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_div1(tensor *out__, tensor self, scalar other) { +int atg_add_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::div(*self, *other); + auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_div_(tensor *out__, tensor self, tensor other) { +int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = self->div_(*other); + auto outputs__ = torch::add_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_div_1(tensor *out__, tensor self, scalar other) { +int atg_add_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->div_(*other); + auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_add_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::div_out(*out, *self, *other); + auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dot(tensor *out__, tensor self, tensor tensor) { +int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = torch::dot(*self, *tensor); + auto outputs__ = torch::addbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { +int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = torch::dot_out(*out, *self, *tensor); + auto outputs__ = self->addbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dropout(tensor *out__, tensor input, double p, int train) { +int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = torch::dropout(*input, p, (bool)train); + auto outputs__ = torch::addbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_dropout_(tensor *out__, tensor self, double p, int train) { +int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::dropout_(*self, p, (bool)train); + auto outputs__ = torch::addcdiv(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eig(tensor *out__, tensor self, int eigenvectors) { +int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::eig(*self, (bool)eigenvectors); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = self->addcdiv_(*tensor1, *tensor2); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eig_out(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) { +int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::eig_out(*e, *v, *self, (bool)eigenvectors); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::addcdiv_out(*out, *self, *tensor1, *tensor2); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_elu(tensor *out__, tensor self) { +int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::elu(*self); + auto outputs__ = torch::addcmul(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_elu_(tensor *out__, tensor self) { +int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::elu_(*self); + auto outputs__ = self->addcmul_(*tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { +int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( - auto outputs__ = torch::elu_backward(*grad_output, *alpha, *scale, *input_scale, *output); + auto outputs__ = torch::addcmul_out(*out, *self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_elu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { +int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( - auto outputs__ = torch::elu_backward_out(*grad_input, *grad_output, *alpha, *scale, *input_scale, *output); + auto outputs__ = torch::addmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_elu_out(tensor *out__, tensor out, tensor self) { +int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( - auto outputs__ = torch::elu_out(*out, *self); + auto outputs__ = self->addmm_(*mat1, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse) { +int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( - auto outputs__ = torch::embedding(*weight, *indices, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); + auto outputs__ = torch::addmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse) { +int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( - auto outputs__ = torch::embedding_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); + auto outputs__ = torch::addmv(*self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights) { +int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( - auto outputs__ = torch::embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor())); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - out__[3] = new torch::Tensor(std::get<3>(outputs__)); + auto outputs__ = torch::addmv_(*self, *mat, *vec); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { +int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec) { PROTECT( - auto outputs__ = torch::embedding_dense_backward(*grad_output, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); + auto outputs__ = torch::addmv_out(*out, *self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max_norm, double norm_type) { +int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( - auto outputs__ = torch::embedding_renorm_(*self, *indices, max_norm, norm_type); + auto outputs__ = torch::addr(*self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { +int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( - auto outputs__ = torch::embedding_sparse_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); + auto outputs__ = self->addr_(*vec1, *vec2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { PROTECT( - auto outputs__ = torch::empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::addr_out(*out, *self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_empty_like(tensor *out__, tensor self) { +int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { PROTECT( - auto outputs__ = torch::empty_like(*self); + auto outputs__ = torch::affine_grid_generator(*theta, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_empty_like1(tensor *out__, tensor self, int options_kind, int options_device) { +int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size_data, int size_len, int align_corners) { PROTECT( - auto outputs__ = torch::empty_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::affine_grid_generator_backward(*grad, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +int atg_alias(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::empty_out(*out, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::alias(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { +int atg_align_as(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = self->align_as(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eq(tensor *out__, tensor self, scalar other) { +int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::eq(*self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::align_tensors(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_eq1(tensor *out__, tensor self, tensor other) { +int atg_all(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::eq(*self, *other); + auto outputs__ = torch::all(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eq_(tensor *out__, tensor self, scalar other) { +int atg_all_all_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->eq_(*other); + auto outputs__ = torch::all_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eq_1(tensor *out__, tensor self, tensor other) { +int atg_all_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = self->eq_(*other); + auto outputs__ = torch::all(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eq_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::eq_out(*out, *self, *other); + auto outputs__ = torch::all_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eq_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( - auto outputs__ = torch::eq_out(*out, *self, *other); + auto outputs__ = torch::alpha_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erf(tensor *out__, tensor self) { +int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( - auto outputs__ = torch::erf(*self); + auto outputs__ = torch::alpha_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erf_(tensor *out__, tensor self) { +int atg_amax(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = torch::erf_(*self); + auto outputs__ = torch::amax(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erf_out(tensor *out__, tensor out, tensor self) { +int atg_amax_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = torch::erf_out(*out, *self); + auto outputs__ = torch::amax_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erfc(tensor *out__, tensor self) { +int atg_amin(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = torch::erfc(*self); + auto outputs__ = torch::amin(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erfc_(tensor *out__, tensor self) { +int atg_amin_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = torch::erfc_(*self); + auto outputs__ = torch::amin_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erfc_out(tensor *out__, tensor out, tensor self) { +int atg_aminmax(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::erfc_out(*out, *self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::aminmax(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_erfinv(tensor *out__, tensor self) { +int atg_aminmax_out(tensor *out__, tensor min, tensor max, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::erfinv(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::aminmax_out(*min, *max, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_erfinv_(tensor *out__, tensor self) { +int atg_angle(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->erfinv_(); + auto outputs__ = torch::angle(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_erfinv_out(tensor *out__, tensor out, tensor self) { +int atg_angle_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::erfinv_out(*out, *self); + auto outputs__ = torch::angle_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_exp(tensor *out__, tensor self) { +int atg_any(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::exp(*self); + auto outputs__ = torch::any(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_exp_(tensor *out__, tensor self) { +int atg_any_all_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::exp_(*self); + auto outputs__ = torch::any_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_exp_out(tensor *out__, tensor out, tensor self) { +int atg_any_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::exp_out(*out, *self); + auto outputs__ = torch::any(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int implicit) { +int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = self->expand(torch::IntArrayRef(size_data, size_len), (bool)implicit); + auto outputs__ = torch::any_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_expand_as(tensor *out__, tensor self, tensor other) { +int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) { PROTECT( - auto outputs__ = self->expand_as(*other); + auto outputs__ = torch::arange(*end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_expm1(tensor *out__, tensor self) { +int atg_arange_out(tensor *out__, tensor out, scalar end) { PROTECT( - auto outputs__ = torch::expm1(*self); + auto outputs__ = torch::arange_out(*out, *end); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_expm1_(tensor *out__, tensor self) { +int atg_arange_start(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::expm1_(*self); + auto outputs__ = torch::arange(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_expm1_out(tensor *out__, tensor out, tensor self) { +int atg_arange_start_out(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( - auto outputs__ = torch::expm1_out(*out, *self); + auto outputs__ = torch::arange_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_exponential_(tensor *out__, tensor self, double lambd) { +int atg_arange_start_step(tensor *out__, scalar start, scalar end, scalar step, int options_kind, int options_device) { PROTECT( - auto outputs__ = self->exponential_(lambd); + auto outputs__ = torch::arange(*start, *end, *step, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { +int atg_arccos(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::eye(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::arccos(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eye1(tensor *out__, int64_t n, int64_t m, int options_kind, int options_device) { +int atg_arccos_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::eye(n, m, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::arccos_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eye_out(tensor *out__, tensor out, int64_t n) { +int atg_arccos_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::eye_out(*out, n); + auto outputs__ = torch::arccos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_eye_out1(tensor *out__, tensor out, int64_t n, int64_t m) { +int atg_arccosh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::eye_out(*out, n, m); + auto outputs__ = torch::arccosh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { +int atg_arccosh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fake_quantize_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max); + auto outputs__ = torch::arccosh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fake_quantize_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { +int atg_arccosh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::fake_quantize_per_channel_affine_backward(*grad, *self, *scale, *zero_point, axis, quant_min, quant_max); + auto outputs__ = torch::arccosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { +int atg_arcsin(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, scale, zero_point, quant_min, quant_max); + auto outputs__ = torch::arcsin(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fake_quantize_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { +int atg_arcsin_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fake_quantize_per_tensor_affine_backward(*grad, *self, scale, zero_point, quant_min, quant_max); + auto outputs__ = torch::arcsin_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_weight, tensor bias) { +int atg_arcsin_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_linear_fp16_weight(*input, *packed_weight, *bias); + auto outputs__ = torch::arcsin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, tensor packed_weight, tensor bias) { +int atg_arcsinh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_linear_fp16_weight_fp32_activation(*input, *packed_weight, *bias); + auto outputs__ = torch::arcsinh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { +int atg_arcsinh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_linear_int8_weight(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); + auto outputs__ = torch::arcsinh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { +int atg_arcsinh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_linear_int8_weight_fp32_activation(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); + auto outputs__ = torch::arcsinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { +int atg_arctan(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_pack_gemm_matrix_fp16(*input); + auto outputs__ = torch::arctan(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { +int atg_arctan_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input); + auto outputs__ = torch::arctan_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fbgemm_pack_quantized_matrix1(tensor *out__, tensor input, int64_t K, int64_t n) { +int atg_arctan_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input, K, n); + auto outputs__ = torch::arctan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) { +int atg_arctanh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::feature_alpha_dropout(*input, p, (bool)train); + auto outputs__ = torch::arctanh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) { +int atg_arctanh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::feature_alpha_dropout_(*self, p, (bool)train); + auto outputs__ = torch::arctanh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { +int atg_arctanh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::feature_dropout(*input, p, (bool)train); + auto outputs__ = torch::arctanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { +int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::feature_dropout_(*self, p, (bool)train); + auto outputs__ = torch::argmax(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { +int atg_argmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::fft(*self, signal_ndim, (bool)normalized); + auto outputs__ = torch::argmax_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fill_(tensor *out__, tensor self, scalar value) { +int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::fill_(*self, *value); + auto outputs__ = torch::argmin(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fill_1(tensor *out__, tensor self, tensor value) { +int atg_argmin_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( - auto outputs__ = torch::fill_(*self, *value); + auto outputs__ = torch::argmin_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) { +int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { PROTECT( - auto outputs__ = self->fill_diagonal_(*fill_value, (bool)wrap); + auto outputs__ = torch::argsort(*self, dim, (bool)descending); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { +int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( - auto outputs__ = torch::flatten(*self, start_dim, end_dim); + auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { +int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( - auto outputs__ = torch::flip(*self, torch::IntArrayRef(dims_data, dims_len)); + auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_floor(tensor *out__, tensor self) { +int atg_asin(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::floor(*self); + auto outputs__ = torch::asin(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_floor_(tensor *out__, tensor self) { +int atg_asin_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::floor_(*self); + auto outputs__ = torch::asin_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_floor_out(tensor *out__, tensor out, tensor self) { +int atg_asin_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::floor_out(*out, *self); + auto outputs__ = torch::asin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod(tensor *out__, tensor self, scalar other) { +int atg_asinh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fmod(*self, *other); + auto outputs__ = torch::asinh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod1(tensor *out__, tensor self, tensor other) { +int atg_asinh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fmod(*self, *other); + auto outputs__ = torch::asinh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod_(tensor *out__, tensor self, scalar other) { +int atg_asinh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->fmod_(*other); + auto outputs__ = torch::asinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod_1(tensor *out__, tensor self, tensor other) { +int atg_atan(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->fmod_(*other); + auto outputs__ = torch::atan(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_atan2(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::fmod_out(*out, *self, *other); + auto outputs__ = torch::atan2(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fmod_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_atan2_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::fmod_out(*out, *self, *other); + auto outputs__ = self->atan2_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_frac(tensor *out__, tensor self) { +int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::frac(*self); + auto outputs__ = torch::atan2_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_frac_(tensor *out__, tensor self) { +int atg_atan_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::frac_(*self); + auto outputs__ = torch::atan_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_frac_out(tensor *out__, tensor out, tensor self) { +int atg_atan_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::frac_out(*out, *self); + auto outputs__ = torch::atan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +int atg_atanh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::atanh(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +int atg_atanh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + auto outputs__ = torch::atanh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +int atg_atanh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + auto outputs__ = torch::atanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool2d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +int atg_atleast_1d(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool2d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::atleast_1d(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +int atg_atleast_1d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::fractional_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::atleast_1d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +int atg_atleast_2d(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + auto outputs__ = torch::atleast_2d(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_fractional_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +int atg_atleast_2d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::fractional_max_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::atleast_2d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_fractional_max_pool3d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +int atg_atleast_3d(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::fractional_max_pool3d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::atleast_3d(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_frobenius_norm(tensor *out__, tensor self) { +int atg_atleast_3d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::frobenius_norm(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::atleast_3d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_frobenius_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { PROTECT( - auto outputs__ = torch::frobenius_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::avg_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::frobenius_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { +int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_full_like(tensor *out__, tensor self, scalar fill_value) { +int atg_avg_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::full_like(*self, *fill_value); + auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_full_like1(tensor *out__, tensor self, scalar fill_value, int options_kind, int options_device) { +int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::full_like(*self, *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, scalar fill_value) { +int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::full_out(*out, torch::IntArrayRef(size_data, size_len), *fill_value); + auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse_grad) { +int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::gather(*self, dim, *index, (bool)sparse_grad); + auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { +int atg_avg_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::gather_out(*out, *self, dim, *index, (bool)sparse_grad); + auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge(tensor *out__, tensor self, scalar other) { +int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( - auto outputs__ = torch::ge(*self, *other); + auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge1(tensor *out__, tensor self, tensor other) { +int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = torch::ge(*self, *other); + auto outputs__ = torch::baddbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge_(tensor *out__, tensor self, scalar other) { +int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = self->ge_(*other); + auto outputs__ = self->baddbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge_1(tensor *out__, tensor self, tensor other) { +int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( - auto outputs__ = self->ge_(*other); + auto outputs__ = torch::baddbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::ge_out(*out, *self, *other); + auto outputs__ = torch::bartlett_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ge_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_bartlett_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::ge_out(*out, *self, *other); + auto outputs__ = torch::bartlett_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gelu(tensor *out__, tensor self) { +int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled) { PROTECT( - auto outputs__ = torch::gelu(*self); + auto outputs__ = torch::batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { +int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu, tensor count) { PROTECT( - auto outputs__ = torch::gelu_backward(*grad, *self); + auto outputs__ = torch::batch_norm_backward_elemt(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), *mean_dy, *mean_dy_xmu, *count); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_geometric_(tensor *out__, tensor self, double p) { +int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g) { PROTECT( - auto outputs__ = self->geometric_(p); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::batch_norm_backward_reduce(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), (bool)input_g, (bool)weight_g, (bool)bias_g); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); return 0; ) return 1; } -int atg_geqrf(tensor *out__, tensor self) { +int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( - auto outputs__ = torch::geqrf(*self); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::batch_norm_elemt(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_geqrf_out(tensor *out__, tensor a, tensor tau, tensor self) { +int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( - auto outputs__ = torch::geqrf_out(*a, *tau, *self); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::batch_norm_elemt_out(*out, *input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ger(tensor *out__, tensor self, tensor vec2) { +int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count) { PROTECT( - auto outputs__ = torch::ger(*self, *vec2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::batch_norm_gather_stats(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, count); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { +int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, tensor counts) { PROTECT( - auto outputs__ = torch::ger_out(*out, *self, *vec2); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, *counts); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_glu(tensor *out__, tensor self, int64_t dim) { +int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { PROTECT( - auto outputs__ = torch::glu(*self, dim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::batch_norm_stats(*input, eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim) { +int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean, tensor running_var, double momentum) { PROTECT( - auto outputs__ = torch::glu_backward(*grad_output, *self, dim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::batch_norm_update_stats(*input, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_glu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t dim) { +int atg_bernoulli(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::glu_backward_out(*grad_input, *grad_output, *self, dim); + auto outputs__ = torch::bernoulli(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { +int atg_bernoulli_(tensor *out__, tensor self, tensor p) { PROTECT( - auto outputs__ = torch::glu_out(*out, *self, dim); + auto outputs__ = self->bernoulli_(*p); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grad(tensor *out__, tensor self) { +int atg_bernoulli_float_(tensor *out__, tensor self, double p) { PROTECT( - auto outputs__ = self->grad(); + auto outputs__ = self->bernoulli_(p); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::grid_sampler(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + auto outputs__ = torch::bernoulli_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +int atg_bernoulli_p(tensor *out__, tensor self, double p) { PROTECT( - auto outputs__ = torch::grid_sampler_2d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + auto outputs__ = torch::bernoulli(*self, p); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, tensor bias) { PROTECT( - auto outputs__ = torch::grid_sampler_2d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::bilinear(*input1, *input2, *weight, (bias ? *bias : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::grid_sampler_3d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + auto outputs__ = torch::binary_cross_entropy(*self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::grid_sampler_3d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::binary_cross_entropy_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled) { +int atg_binary_cross_entropy_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::group_norm(*input, num_groups, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enabled); + auto outputs__ = torch::binary_cross_entropy_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::binary_cross_entropy_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::binary_cross_entropy_with_logits(*self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( - auto outputs__ = torch::gru_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); + auto outputs__ = torch::binary_cross_entropy_with_logits_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt(tensor *out__, tensor self, scalar other) { +int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) { PROTECT( - auto outputs__ = torch::gt(*self, *other); + auto outputs__ = torch::bincount(*self, (weights ? *weights : torch::Tensor()), minlength); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt1(tensor *out__, tensor self, tensor other) { +int atg_binomial(tensor *out__, tensor count, tensor prob) { PROTECT( - auto outputs__ = torch::gt(*self, *other); + auto outputs__ = torch::binomial(*count, *prob); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt_(tensor *out__, tensor self, scalar other) { +int atg_bitwise_and(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->gt_(*other); + auto outputs__ = torch::bitwise_and(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt_1(tensor *out__, tensor self, tensor other) { +int atg_bitwise_and_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->gt_(*other); + auto outputs__ = self->bitwise_and_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_bitwise_and_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::gt_out(*out, *self, *other); + auto outputs__ = torch::bitwise_and_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_gt_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_bitwise_and_tensor(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::gt_out(*out, *self, *other); + auto outputs__ = torch::bitwise_and(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +int atg_bitwise_and_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hamming_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = self->bitwise_and_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hamming_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +int atg_bitwise_and_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hamming_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::bitwise_and_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hamming_window2(tensor *out__, int64_t window_length, int periodic, double alpha, int options_kind, int options_device) { +int atg_bitwise_left_shift(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hamming_window3(tensor *out__, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device) { +int atg_bitwise_left_shift_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = self->bitwise_left_shift_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +int atg_bitwise_left_shift_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::hann_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hann_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +int atg_bitwise_left_shift_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hann_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::bitwise_left_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardshrink(tensor *out__, tensor self) { +int atg_bitwise_left_shift_tensor_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::hardshrink(*self); + auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar lambd) { +int atg_bitwise_left_shift_tensor_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::hardshrink_backward(*grad_out, *self, *lambd); + auto outputs__ = self->bitwise_left_shift_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardtanh(tensor *out__, tensor self) { +int atg_bitwise_left_shift_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::hardtanh(*self); + auto outputs__ = torch::bitwise_left_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardtanh_(tensor *out__, tensor self) { +int atg_bitwise_not(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::hardtanh_(*self); + auto outputs__ = torch::bitwise_not(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar min_val, scalar max_val) { +int atg_bitwise_not_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::hardtanh_backward(*grad_output, *self, *min_val, *max_val); + auto outputs__ = self->bitwise_not_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardtanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val) { +int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::hardtanh_backward_out(*grad_input, *grad_output, *self, *min_val, *max_val); + auto outputs__ = torch::bitwise_not_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { +int atg_bitwise_or(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::hardtanh_out(*out, *self); + auto outputs__ = torch::bitwise_or(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { +int atg_bitwise_or_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::hinge_embedding_loss(*self, *target, margin, reduction); + auto outputs__ = self->bitwise_or_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_histc(tensor *out__, tensor self, int64_t bins) { +int atg_bitwise_or_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::histc(*self, bins); + auto outputs__ = torch::bitwise_or_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { +int atg_bitwise_or_tensor(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::histc_out(*out, *self, bins); + auto outputs__ = torch::bitwise_or(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { +int atg_bitwise_or_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hspmm(*mat1, *mat2); + auto outputs__ = self->bitwise_or_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { +int atg_bitwise_or_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::hspmm_out(*out, *mat1, *mat2); + auto outputs__ = torch::bitwise_or_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ifft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { +int atg_bitwise_right_shift(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::ifft(*self, signal_ndim, (bool)normalized); + auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg_bitwise_right_shift_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::im2col(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = self->bitwise_right_shift_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg_bitwise_right_shift_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::im2col_backward(*grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_im2col_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg_bitwise_right_shift_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::im2col_backward_out(*grad_input, *grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::bitwise_right_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +int atg_bitwise_right_shift_tensor_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::im2col_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_imag(tensor *out__, tensor self) { +int atg_bitwise_right_shift_tensor_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::imag(*self); + auto outputs__ = self->bitwise_right_shift_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_imag_out(tensor *out__, tensor out, tensor self) { +int atg_bitwise_right_shift_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::imag_out(*out, *self); + auto outputs__ = torch::bitwise_right_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) { +int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::index(*self, of_carray_tensor(indices_data, indices_len)); + auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::index_add(*self, dim, *index, *source); + auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +int atg_bitwise_xor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = self->index_add_(dim, *index, *source); + auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +int atg_bitwise_xor_tensor(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::index_copy(*self, dim, *index, *source); + auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +int atg_bitwise_xor_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = self->index_copy_(dim, *index, *source); + auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +int atg_bitwise_xor_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::index_fill(*self, dim, *index, *value); + auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_fill1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { +int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::index_fill(*self, dim, *index, *value); + auto outputs__ = torch::blackman_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +int atg_blackman_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( - auto outputs__ = self->index_fill_(dim, *index, *value); + auto outputs__ = torch::blackman_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_fill_1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { +int atg_block_diag(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = self->index_fill_(dim, *index, *value); + auto outputs__ = torch::block_diag(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { +int atg_bmm(tensor *out__, tensor self, tensor mat2) { PROTECT( - auto outputs__ = torch::index_put(*self, of_carray_tensor(indices_data, indices_len), *values, (bool)accumulate); + auto outputs__ = torch::bmm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { +int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { PROTECT( - auto outputs__ = torch::index_put_(*self, of_carray_tensor(indices_data, indices_len), *values, (bool)accumulate); + auto outputs__ = torch::bmm_out(*out, *self, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { +int atg_broadcast_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::index_select(*self, dim, *index); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::broadcast_tensors(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { +int atg_broadcast_to(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::index_select_out(*out, *self, dim, *index); + auto outputs__ = torch::broadcast_to(*self, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_indices(tensor *out__, tensor self) { +int atg_bucketize(tensor *out__, tensor self, tensor boundaries, int out_int32, int right) { PROTECT( - auto outputs__ = self->indices(); + auto outputs__ = torch::bucketize(*self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { +int atg_bucketize_scalar(tensor *out__, scalar self, tensor boundaries, int out_int32, int right) { PROTECT( - auto outputs__ = torch::instance_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)use_input_stats, momentum, eps, (bool)cudnn_enabled); + auto outputs__ = torch::bucketize(*self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_int_repr(tensor *out__, tensor self) { +int atg_bucketize_tensor_out(tensor *out__, tensor out, tensor self, tensor boundaries, int out_int32, int right) { PROTECT( - auto outputs__ = torch::int_repr(*self); + auto outputs__ = torch::bucketize_out(*out, *self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_inverse(tensor *out__, tensor self) { +int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::inverse(*self); + auto outputs__ = torch::cartesian_prod(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_inverse_out(tensor *out__, tensor out, tensor self) { +int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::inverse_out(*out, *self); + auto outputs__ = torch::cat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_irfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len) { +int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::irfft(*self, signal_ndim, (bool)normalized, (bool)onesided, torch::IntArrayRef(signal_sizes_data, signal_sizes_len)); + auto outputs__ = torch::cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double atol, int equal_nan) { +int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { PROTECT( - auto outputs__ = torch::isclose(*self, *other, rtol, atol, (bool)equal_nan); + auto outputs__ = self->cauchy_(median, sigma); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_isfinite(tensor *out__, tensor self) { +int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { PROTECT( - auto outputs__ = torch::isfinite(*self); + auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_isnan(tensor *out__, tensor self) { +int atg_ceil(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::isnan(*self); + auto outputs__ = torch::ceil(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_ceil_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::kl_div(*self, *target, reduction); + auto outputs__ = torch::ceil_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_ceil_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::ceil_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim) { +int atg_celu(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::kthvalue(*self, k, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::celu(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_kthvalue_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim) { +int atg_celu_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::kthvalue_out(*values, *indices, *self, k, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::celu_(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { PROTECT( - auto outputs__ = torch::l1_loss(*self, *target, reduction); + auto outputs__ = torch::chain_matmul(of_carray_tensor(matrices_data, matrices_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_chain_matmul_out(tensor *out__, tensor out, tensor *matrices_data, int matrices_len) { PROTECT( - auto outputs__ = torch::l1_loss_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::chain_matmul_out(*out, of_carray_tensor(matrices_data, matrices_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_channel_shuffle(tensor *out__, tensor self, int64_t groups) { PROTECT( - auto outputs__ = torch::l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + auto outputs__ = torch::channel_shuffle(*self, groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +int atg_cholesky(tensor *out__, tensor self, int upper) { PROTECT( - auto outputs__ = torch::l1_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::cholesky(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable) { +int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { PROTECT( - auto outputs__ = torch::layer_norm(*input, torch::IntArrayRef(normalized_shape_data, normalized_shape_len), (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enable); + auto outputs__ = torch::cholesky_inverse(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_le(tensor *out__, tensor self, scalar other) { +int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( - auto outputs__ = torch::le(*self, *other); + auto outputs__ = torch::cholesky_inverse_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_le1(tensor *out__, tensor self, tensor other) { +int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( - auto outputs__ = torch::le(*self, *other); + auto outputs__ = torch::cholesky_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_le_(tensor *out__, tensor self, scalar other) { +int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { PROTECT( - auto outputs__ = self->le_(*other); + auto outputs__ = torch::cholesky_solve(*self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_le_1(tensor *out__, tensor self, tensor other) { +int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2, int upper) { PROTECT( - auto outputs__ = self->le_(*other); + auto outputs__ = torch::cholesky_solve_out(*out, *self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_le_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_choose_qparams_optimized(tensor *out__, tensor input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { PROTECT( - auto outputs__ = torch::le_out(*out, *self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::choose_qparams_optimized(*input, numel, n_bins, ratio, bit_width); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_le_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { PROTECT( - auto outputs__ = torch::le_out(*out, *self, *other); + auto outputs__ = torch::chunk(*self, chunks, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { + PROTECT( + auto outputs__ = torch::clamp(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_leaky_relu(tensor *out__, tensor self) { +int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( - auto outputs__ = torch::leaky_relu(*self); + auto outputs__ = torch::clamp_(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_leaky_relu_(tensor *out__, tensor self) { +int atg_clamp_max(tensor *out__, tensor self, scalar max) { PROTECT( - auto outputs__ = torch::leaky_relu_(*self); + auto outputs__ = torch::clamp_max(*self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scalar negative_slope) { +int atg_clamp_max_(tensor *out__, tensor self, scalar max) { PROTECT( - auto outputs__ = torch::leaky_relu_backward(*grad_output, *self, *negative_slope); + auto outputs__ = torch::clamp_max_(*self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_leaky_relu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope) { +int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { PROTECT( - auto outputs__ = torch::leaky_relu_backward_out(*grad_input, *grad_output, *self, *negative_slope); + auto outputs__ = torch::clamp_max_out(*out, *self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { +int atg_clamp_max_tensor(tensor *out__, tensor self, tensor max) { PROTECT( - auto outputs__ = torch::leaky_relu_out(*out, *self); + auto outputs__ = torch::clamp_max(*self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { +int atg_clamp_max_tensor_(tensor *out__, tensor self, tensor max) { PROTECT( - auto outputs__ = torch::lerp(*self, *end, *weight); + auto outputs__ = torch::clamp_max_(*self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp1(tensor *out__, tensor self, tensor end, tensor weight) { +int atg_clamp_max_tensor_out(tensor *out__, tensor out, tensor self, tensor max) { PROTECT( - auto outputs__ = torch::lerp(*self, *end, *weight); + auto outputs__ = torch::clamp_max_out(*out, *self, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { +int atg_clamp_min(tensor *out__, tensor self, scalar min) { PROTECT( - auto outputs__ = self->lerp_(*end, *weight); + auto outputs__ = torch::clamp_min(*self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp_1(tensor *out__, tensor self, tensor end, tensor weight) { +int atg_clamp_min_(tensor *out__, tensor self, scalar min) { PROTECT( - auto outputs__ = self->lerp_(*end, *weight); + auto outputs__ = torch::clamp_min_(*self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp_out(tensor *out__, tensor out, tensor self, tensor end, scalar weight) { +int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { PROTECT( - auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); + auto outputs__ = torch::clamp_min_out(*out, *self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lerp_out1(tensor *out__, tensor out, tensor self, tensor end, tensor weight) { +int atg_clamp_min_tensor(tensor *out__, tensor self, tensor min) { PROTECT( - auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); + auto outputs__ = torch::clamp_min(*self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lgamma(tensor *out__, tensor self) { +int atg_clamp_min_tensor_(tensor *out__, tensor self, tensor min) { PROTECT( - auto outputs__ = torch::lgamma(*self); + auto outputs__ = torch::clamp_min_(*self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lgamma_(tensor *out__, tensor self) { +int atg_clamp_min_tensor_out(tensor *out__, tensor out, tensor self, tensor min) { PROTECT( - auto outputs__ = self->lgamma_(); + auto outputs__ = torch::clamp_min_out(*out, *self, *min); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lgamma_out(tensor *out__, tensor out, tensor self) { +int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { PROTECT( - auto outputs__ = torch::lgamma_out(*out, *self); + auto outputs__ = torch::clamp_out(*out, *self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { +int atg_clamp_tensor(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::linear(*input, *weight, (bias ? *bias : torch::Tensor())); + auto outputs__ = torch::clamp(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { +int atg_clamp_tensor_(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::clamp_(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { +int atg_clamp_tensor_out(tensor *out__, tensor out, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::linspace_out(*out, *start, *end, steps); + auto outputs__ = torch::clamp_out(*out, *self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log(tensor *out__, tensor self) { +int atg_clip(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( - auto outputs__ = torch::log(*self); + auto outputs__ = torch::clip(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log10(tensor *out__, tensor self) { +int atg_clip_(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( - auto outputs__ = torch::log10(*self); + auto outputs__ = torch::clip_(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log10_(tensor *out__, tensor self) { +int atg_clip_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { PROTECT( - auto outputs__ = torch::log10_(*self); + auto outputs__ = torch::clip_out(*out, *self, *min, *max); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log10_out(tensor *out__, tensor out, tensor self) { +int atg_clip_tensor(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::log10_out(*out, *self); + auto outputs__ = torch::clip(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log1p(tensor *out__, tensor self) { +int atg_clip_tensor_(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::log1p(*self); + auto outputs__ = torch::clip_(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log1p_(tensor *out__, tensor self) { +int atg_clip_tensor_out(tensor *out__, tensor out, tensor self, tensor min, tensor max) { PROTECT( - auto outputs__ = torch::log1p_(*self); + auto outputs__ = torch::clip_out(*out, *self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log1p_out(tensor *out__, tensor out, tensor self) { +int atg_clone(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::log1p_out(*out, *self); + auto outputs__ = torch::clone(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log2(tensor *out__, tensor self) { +int atg_coalesce(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::log2(*self); + auto outputs__ = self->coalesce(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log2_(tensor *out__, tensor self) { +int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = torch::log2_(*self); + auto outputs__ = torch::col2im(*self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log2_out(tensor *out__, tensor out, tensor self) { +int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = torch::log2_out(*out, *self); + auto outputs__ = torch::col2im_backward(*grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_(tensor *out__, tensor self) { +int atg_col2im_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = torch::log_(*self); + auto outputs__ = torch::col2im_backward_out(*grad_input, *grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { +int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( - auto outputs__ = self->log_normal_(mean, std); + auto outputs__ = torch::col2im_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_out(tensor *out__, tensor out, tensor self) { +int atg_col_indices(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::log_out(*out, *self); + auto outputs__ = self->col_indices(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_sigmoid(tensor *out__, tensor self) { +int atg_column_stack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::log_sigmoid(*self); + auto outputs__ = torch::column_stack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, tensor buffer) { +int atg_column_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::log_sigmoid_backward(*grad_output, *self, *buffer); + auto outputs__ = torch::column_stack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor buffer) { +int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement) { PROTECT( - auto outputs__ = torch::log_sigmoid_backward_out(*grad_input, *grad_output, *self, *buffer); + auto outputs__ = torch::combinations(*self, r, (bool)with_replacement); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { +int atg_complex(tensor *out__, tensor real, tensor imag) { PROTECT( - auto outputs__ = torch::log_sigmoid_out(*out, *self); + auto outputs__ = torch::complex(*real, *imag); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { +int atg_complex_out(tensor *out__, tensor out, tensor real, tensor imag) { PROTECT( - auto outputs__ = torch::log_softmax(*self, dim, torch::ScalarType(dtype)); + auto outputs__ = torch::complex_out(*out, *real, *imag); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logdet(tensor *out__, tensor self) { +int atg_concat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::logdet(*self); + auto outputs__ = torch::concat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_not(tensor *out__, tensor self) { +int atg_concat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::logical_not(*self); + auto outputs__ = torch::concat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_not_(tensor *out__, tensor self) { +int atg_conj(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->logical_not_(); + auto outputs__ = torch::conj(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_not_out(tensor *out__, tensor out, tensor self) { +int atg_conj_physical(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::logical_not_out(*out, *self); + auto outputs__ = torch::conj_physical(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_xor(tensor *out__, tensor self, tensor other) { +int atg_conj_physical_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::logical_xor(*self, *other); + auto outputs__ = torch::conj_physical_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_xor_(tensor *out__, tensor self, tensor other) { +int atg_conj_physical_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->logical_xor_(*other); + auto outputs__ = torch::conj_physical_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_len) { PROTECT( - auto outputs__ = torch::logical_xor_out(*out, *self, *other); + auto outputs__ = torch::constant_pad_nd(*self, torch::IntArrayRef(pad_data, pad_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { +int atg_contiguous(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = self->contiguous(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { +int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); + auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_conv1d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +int atg_conv2d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +int atg_conv3d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( - auto outputs__ = torch::lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lstsq(tensor *out__, tensor self, tensor A) { +int atg_conv_depthwise3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::lstsq(*self, *A); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::conv_depthwise3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lstsq_out(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { +int atg_conv_depthwise3d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::lstsq_out(*X, *qr, *self, *A); + auto outputs__ = torch::conv_depthwise3d_backward_out(*grad_input, *grad_weight, *grad_bias, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_lt(tensor *out__, tensor self, scalar other) { +int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { PROTECT( - auto outputs__ = torch::lt(*self, *other); + auto outputs__ = torch::conv_tbc(*self, *weight, *bias, pad); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lt1(tensor *out__, tensor self, tensor other) { +int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weight, tensor bias, int64_t pad) { PROTECT( - auto outputs__ = torch::lt(*self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::conv_tbc_backward(*self, *input, *weight, *bias, pad); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_lt_(tensor *out__, tensor self, scalar other) { +int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = self->lt_(*other); + auto outputs__ = torch::conv_transpose1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lt_1(tensor *out__, tensor self, tensor other) { +int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = self->lt_(*other); + auto outputs__ = torch::conv_transpose2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lt_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::lt_out(*out, *self, *other); + auto outputs__ = torch::conv_transpose3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lt_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( - auto outputs__ = torch::lt_out(*out, *self, *other); + auto outputs__ = torch::convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { +int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( - auto outputs__ = torch::lu_solve(*self, *LU_data, *LU_pivots); + auto outputs__ = torch::convolution_overrideable(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, tensor LU_pivots) { +int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_blocking) { PROTECT( - auto outputs__ = torch::lu_solve_out(*out, *self, *LU_data, *LU_pivots); + auto outputs__ = torch::copy_sparse_to_sparse_(*self, *src, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { +int atg_copysign(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::margin_ranking_loss(*input1, *input2, *target, margin, reduction); + auto outputs__ = torch::copysign(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { +int atg_copysign_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::masked_fill(*self, *mask, *value); + auto outputs__ = self->copysign_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_fill1(tensor *out__, tensor self, tensor mask, tensor value) { +int atg_copysign_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::masked_fill(*self, *mask, *value); + auto outputs__ = torch::copysign_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { +int atg_copysign_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->masked_fill_(*mask, *value); + auto outputs__ = torch::copysign(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_fill_1(tensor *out__, tensor self, tensor mask, tensor value) { +int atg_copysign_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = self->masked_fill_(*mask, *value); + auto outputs__ = self->copysign_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { +int atg_copysign_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::masked_scatter(*self, *mask, *source); + auto outputs__ = torch::copysign_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) { +int atg_corrcoef(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->masked_scatter_(*mask, *source); + auto outputs__ = torch::corrcoef(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_select(tensor *out__, tensor self, tensor mask) { +int atg_cos(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::masked_select(*self, *mask); + auto outputs__ = torch::cos(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { +int atg_cos_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::masked_select_out(*out, *self, *mask); + auto outputs__ = torch::cos_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_matmul(tensor *out__, tensor self, tensor other) { +int atg_cos_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::matmul(*self, *other); + auto outputs__ = torch::cos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_cosh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::matmul_out(*out, *self, *other); + auto outputs__ = torch::cosh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_matrix_power(tensor *out__, tensor self, int64_t n) { +int atg_cosh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::matrix_power(*self, n); + auto outputs__ = torch::cosh_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cosh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::cosh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { + PROTECT( + auto outputs__ = torch::cosine_embedding_loss(*input1, *input2, *target, margin, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, double eps) { + PROTECT( + auto outputs__ = torch::cosine_similarity(*x1, *x2, dim, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cov(tensor *out__, tensor self, int64_t correction, tensor fweights, tensor aweights) { + PROTECT( + auto outputs__ = torch::cov(*self, correction, (fweights ? *fweights : torch::Tensor()), (aweights ? *aweights : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { + PROTECT( + auto outputs__ = torch::cross(*self, *other, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cross_entropy_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, double label_smoothing) { + PROTECT( + auto outputs__ = torch::cross_entropy_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, label_smoothing); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { + PROTECT( + auto outputs__ = torch::cross_out(*out, *self, *other, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_crow_indices(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->crow_indices(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity) { + PROTECT( + auto outputs__ = torch::ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, reduction, (bool)zero_infinity); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ctc_loss_tensor(tensor *out__, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity) { + PROTECT( + auto outputs__ = torch::ctc_loss(*log_probs, *targets, *input_lengths, *target_lengths, blank, reduction, (bool)zero_infinity); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W) { + PROTECT( + auto outputs__ = torch::cudnn_affine_grid_generator(*theta, n, C, H, W); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W) { + PROTECT( + auto outputs__ = torch::cudnn_affine_grid_generator_backward(*grad, n, C, H, W); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { + PROTECT( + auto outputs__ = torch::cudnn_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + return 0; +) +return 1; +} + +int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace) { + PROTECT( + auto outputs__ = torch::cudnn_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon, *reserveSpace); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_add_relu(tensor *out__, tensor self, tensor weight, tensor z, scalar alpha, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_add_relu(*self, *weight, *z, *alpha, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::cudnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_relu(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_relu(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_transpose_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_convolution_transpose_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { + PROTECT( + auto outputs__ = torch::cudnn_grid_sampler(*self, *grid); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, tensor grad_output) { + PROTECT( + auto outputs__ = torch::cudnn_grid_sampler_backward(*self, *grid, *grad_output); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_cummax(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummax(*self, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_cummax_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummax_out(*values, *indices, *self, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_cummaxmin_backward(tensor *out__, tensor grad, tensor input, tensor indices, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummaxmin_backward(*grad, *input, *indices, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cummin(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummin(*self, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_cummin_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummin_out(*values, *indices, *self, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::cumprod(*self, dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumprod_(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = self->cumprod_(dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumprod_backward(tensor *out__, tensor grad, tensor input, int64_t dim, tensor output) { + PROTECT( + auto outputs__ = torch::cumprod_backward(*grad, *input, dim, *output); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::cumprod_out(*out, *self, dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::cumsum(*self, dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumsum_(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = self->cumsum_(dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::cumsum_out(*out, *self, dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumulative_trapezoid(tensor *out__, tensor y, int64_t dim) { + PROTECT( + auto outputs__ = torch::cumulative_trapezoid(*y, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_cumulative_trapezoid_x(tensor *out__, tensor y, tensor x, int64_t dim) { + PROTECT( + auto outputs__ = torch::cumulative_trapezoid(*y, *x, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_data(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->data(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_deg2rad(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_deg2rad_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_deg2rad_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dequantize(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::dequantize(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dequantize_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dequantize(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_det(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::det(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_detach(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::detach(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_detach_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::detach_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diag(tensor *out__, tensor self, int64_t diagonal) { + PROTECT( + auto outputs__ = torch::diag(*self, diagonal); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diag_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t diagonal) { + PROTECT( + auto outputs__ = torch::diag_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), diagonal); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::diag_embed(*self, offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { + PROTECT( + auto outputs__ = torch::diag_out(*out, *self, diagonal); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diagflat(tensor *out__, tensor self, int64_t offset) { + PROTECT( + auto outputs__ = torch::diagflat(*self, offset); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::diagonal(*self, offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diagonal_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::diagonal_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diff(tensor *out__, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append) { + PROTECT( + auto outputs__ = torch::diff(*self, n, dim, (prepend ? *prepend : torch::Tensor()), (append ? *append : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_diff_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append) { + PROTECT( + auto outputs__ = torch::diff_out(*out, *self, n, dim, (prepend ? *prepend : torch::Tensor()), (append ? *append : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_digamma(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::digamma(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_digamma_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->digamma_(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_digamma_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::digamma_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dist(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::dist(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::div(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->div_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::div_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_out_mode(tensor *out__, tensor out, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::div_out(*out, *self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_scalar(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::div(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_scalar_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->div_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_scalar_mode(tensor *out__, tensor self, scalar other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::div(*self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_scalar_mode_(tensor *out__, tensor self, scalar other, char * rounding_mode) { + PROTECT( + auto outputs__ = self->div_(*other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_tensor_mode(tensor *out__, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::div(*self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_div_tensor_mode_(tensor *out__, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = self->div_(*other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->divide_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::divide_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_out_mode(tensor *out__, tensor out, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::divide_out(*out, *self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_scalar(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_scalar_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->divide_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_scalar_mode(tensor *out__, tensor self, scalar other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::divide(*self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_scalar_mode_(tensor *out__, tensor self, scalar other, char * rounding_mode) { + PROTECT( + auto outputs__ = self->divide_(*other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_tensor_mode(tensor *out__, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = torch::divide(*self, *other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_divide_tensor_mode_(tensor *out__, tensor self, tensor other, char * rounding_mode) { + PROTECT( + auto outputs__ = self->divide_(*other, std::string(rounding_mode)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dot(tensor *out__, tensor self, tensor tensor) { + PROTECT( + auto outputs__ = torch::dot(*self, *tensor); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { + PROTECT( + auto outputs__ = torch::dot_out(*out, *self, *tensor); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dropout(tensor *out__, tensor input, double p, int train) { + PROTECT( + auto outputs__ = torch::dropout(*input, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dropout_(tensor *out__, tensor self, double p, int train) { + PROTECT( + auto outputs__ = torch::dropout_(*self, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dsplit(tensor *out__, tensor self, int64_t sections) { + PROTECT( + auto outputs__ = torch::dsplit(*self, sections); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_dsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { + PROTECT( + auto outputs__ = torch::dsplit(*self, torch::IntArrayRef(indices_data, indices_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_dstack(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dstack(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_dstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eig(tensor *out__, tensor self, int eigenvectors) { + PROTECT( + auto outputs__ = torch::eig(*self, (bool)eigenvectors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_eig_e(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) { + PROTECT( + auto outputs__ = torch::eig_out(*e, *v, *self, (bool)eigenvectors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_einsum(tensor *out__, char * equation, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::einsum(std::string(equation), of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_elu(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::elu(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_elu_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::elu_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result) { + PROTECT( + auto outputs__ = torch::elu_backward(*grad_output, *alpha, *scale, *input_scale, (bool)is_result, *self_or_result); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_elu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result) { + PROTECT( + auto outputs__ = torch::elu_backward_out(*grad_input, *grad_output, *alpha, *scale, *input_scale, (bool)is_result, *self_or_result); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_elu_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::elu_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse) { + PROTECT( + auto outputs__ = torch::embedding(*weight, *indices, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse) { + PROTECT( + auto outputs__ = torch::embedding_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset) { + PROTECT( + auto outputs__ = torch::embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + return 0; +) +return 1; +} + +int atg_embedding_bag_padding_idx(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { + PROTECT( + auto outputs__ = torch::embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset, padding_idx); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + return 0; +) +return 1; +} + +int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { + PROTECT( + auto outputs__ = torch::embedding_dense_backward(*grad_output, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max_norm, double norm_type) { + PROTECT( + auto outputs__ = torch::embedding_renorm_(*self, *indices, max_norm, norm_type); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { + PROTECT( + auto outputs__ = torch::embedding_sparse_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_empty_like(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::empty_like(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { + PROTECT( + auto outputs__ = torch::empty_out(*out, torch::IntArrayRef(size_data, size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_empty_quantized(tensor *out__, int64_t *size_data, int size_len, tensor qtensor, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::empty_quantized(torch::IntArrayRef(size_data, size_len), *qtensor, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::eq(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->eq_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::eq_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::eq(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->eq_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eq_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::eq_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::erf(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erf_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::erf_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erf_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::erf_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfc(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::erfc(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfc_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::erfc_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfc_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::erfc_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfinv(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::erfinv(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfinv_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->erfinv_(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_erfinv_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::erfinv_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp2(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp2(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp2_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp2_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp2_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::exp2_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exp_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::exp_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int implicit) { + PROTECT( + auto outputs__ = self->expand(torch::IntArrayRef(size_data, size_len), (bool)implicit); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_expand_as(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->expand_as(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_expm1(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::expm1(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_expm1_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::expm1_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_expm1_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::expm1_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_exponential_(tensor *out__, tensor self, double lambd) { + PROTECT( + auto outputs__ = self->exponential_(lambd); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::eye(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eye_m(tensor *out__, int64_t n, int64_t m, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::eye(n, m, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eye_m_out(tensor *out__, tensor out, int64_t n, int64_t m) { + PROTECT( + auto outputs__ = torch::eye_out(*out, n, m); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_eye_out(tensor *out__, tensor out, int64_t n) { + PROTECT( + auto outputs__ = torch::eye_out(*out, n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_channel_affine_cachemask(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_channel_affine_cachemask(*self, *scale, *zero_point, axis, quant_min, quant_max); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_channel_affine_cachemask_backward(tensor *out__, tensor grad, tensor mask) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_channel_affine_cachemask_backward(*grad, *mask); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, scale, zero_point, quant_min, quant_max); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_tensor_affine_cachemask(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_tensor_affine_cachemask(*self, scale, zero_point, quant_min, quant_max); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_tensor_affine_cachemask_backward(tensor *out__, tensor grad, tensor mask) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_tensor_affine_cachemask_backward(*grad, *mask); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fake_quantize_per_tensor_affine_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, *scale, *zero_point, quant_min, quant_max); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_weight, tensor bias) { + PROTECT( + auto outputs__ = torch::fbgemm_linear_fp16_weight(*input, *packed_weight, *bias); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, tensor packed_weight, tensor bias) { + PROTECT( + auto outputs__ = torch::fbgemm_linear_fp16_weight_fp32_activation(*input, *packed_weight, *bias); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { + PROTECT( + auto outputs__ = torch::fbgemm_linear_int8_weight(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { + PROTECT( + auto outputs__ = torch::fbgemm_linear_int8_weight_fp32_activation(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { + PROTECT( + auto outputs__ = torch::fbgemm_pack_gemm_matrix_fp16(*input); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { + PROTECT( + auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fbgemm_pack_quantized_matrix_kn(tensor *out__, tensor input, int64_t K, int64_t n) { + PROTECT( + auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input, K, n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) { + PROTECT( + auto outputs__ = torch::feature_alpha_dropout(*input, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) { + PROTECT( + auto outputs__ = torch::feature_alpha_dropout_(*self, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { + PROTECT( + auto outputs__ = torch::feature_dropout(*input, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { + PROTECT( + auto outputs__ = torch::feature_dropout_(*self, p, (bool)train); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fftfreq(tensor *out__, int64_t n, double d, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::fft_fftfreq(n, d, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fftfreq_out(tensor *out__, tensor out, int64_t n, double d) { + PROTECT( + auto outputs__ = torch::fft_fftfreq_out(*out, n, d); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_fftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_fftshift(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { + PROTECT( + auto outputs__ = torch::fft_fftshift(*self, torch::IntArrayRef(dim_data, dim_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_hfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_hfft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_hfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_hfft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ifftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ifftshift(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { + PROTECT( + auto outputs__ = torch::fft_ifftshift(*self, torch::IntArrayRef(dim_data, dim_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ihfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ihfft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_ihfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_ihfft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_irfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_irfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfft(*self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfft_out(*out, *self, n, dim, std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfftfreq(tensor *out__, int64_t n, double d, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::fft_rfftfreq(n, d, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfftfreq_out(tensor *out__, tensor out, int64_t n, double d) { + PROTECT( + auto outputs__ = torch::fft_rfftfreq_out(*out, n, d); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fft_rfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { + PROTECT( + auto outputs__ = torch::fft_rfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fill_(tensor *out__, tensor self, scalar value) { + PROTECT( + auto outputs__ = torch::fill_(*self, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) { + PROTECT( + auto outputs__ = self->fill_diagonal_(*fill_value, (bool)wrap); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fill_tensor_(tensor *out__, tensor self, tensor value) { + PROTECT( + auto outputs__ = torch::fill_(*self, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fix(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fix(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fix_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fix_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fix_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::fix_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { + PROTECT( + auto outputs__ = torch::flatten(*self, start_dim, end_dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_flatten_dense_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::flatten_dense_tensors(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { + PROTECT( + auto outputs__ = torch::flip(*self, torch::IntArrayRef(dims_data, dims_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fliplr(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fliplr(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_flipud(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::flipud(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power(tensor *out__, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = torch::float_power(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_(tensor *out__, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = self->float_power_(*exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_scalar(tensor *out__, scalar self, tensor exponent) { + PROTECT( + auto outputs__ = torch::float_power(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_scalar_out(tensor *out__, tensor out, scalar self, tensor exponent) { + PROTECT( + auto outputs__ = torch::float_power_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_tensor_(tensor *out__, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = self->float_power_(*exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_tensor_scalar(tensor *out__, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = torch::float_power(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = torch::float_power_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_float_power_tensor_tensor_out(tensor *out__, tensor out, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = torch::float_power_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::floor(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::floor_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_divide(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::floor_divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_divide_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->floor_divide_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_divide_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::floor_divide_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_divide_scalar(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::floor_divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_divide_scalar_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->floor_divide_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_floor_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::floor_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmax(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmax(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmax_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmax_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmin(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmin(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmin_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmin_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::fmod(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->fmod_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::fmod_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmod(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->fmod_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fmod_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::fmod_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_frac(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::frac(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_frac_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::frac_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_frac_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::frac_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { + PROTECT( + auto outputs__ = torch::fractional_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { + PROTECT( + auto outputs__ = torch::fractional_max_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fractional_max_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { + PROTECT( + auto outputs__ = torch::fractional_max_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fractional_max_pool2d_output(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { + PROTECT( + auto outputs__ = torch::fractional_max_pool2d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { + PROTECT( + auto outputs__ = torch::fractional_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { + PROTECT( + auto outputs__ = torch::fractional_max_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fractional_max_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { + PROTECT( + auto outputs__ = torch::fractional_max_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fractional_max_pool3d_output(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { + PROTECT( + auto outputs__ = torch::fractional_max_pool3d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_frexp(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::frexp(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_frexp_tensor_out(tensor *out__, tensor mantissa, tensor exponent, tensor self) { + PROTECT( + auto outputs__ = torch::frexp_out(*mantissa, *exponent, *self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_frobenius_norm(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::frobenius_norm(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_frobenius_norm_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::frobenius_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::frobenius_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_from_file(tensor *out__, char * filename, int shared, int64_t size, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::from_file(std::string(filename), (bool)shared, size, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_full_like(tensor *out__, tensor self, scalar fill_value) { + PROTECT( + auto outputs__ = torch::full_like(*self, *fill_value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, scalar fill_value) { + PROTECT( + auto outputs__ = torch::full_out(*out, torch::IntArrayRef(size_data, size_len), *fill_value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_fused_moving_avg_obs_fake_quant(tensor *out__, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant) { + PROTECT( + auto outputs__ = torch::fused_moving_avg_obs_fake_quant(*self, *observer_on, *fake_quant_on, *running_min, *running_max, *scale, *zero_point, averaging_const, quant_min, quant_max, ch_axis, (bool)per_row_fake_quant, (bool)symmetric_quant); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse_grad) { + PROTECT( + auto outputs__ = torch::gather(*self, dim, *index, (bool)sparse_grad); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gather_backward(tensor *out__, tensor grad, tensor self, int64_t dim, tensor index, int sparse_grad) { + PROTECT( + auto outputs__ = torch::gather_backward(*grad, *self, dim, *index, (bool)sparse_grad); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { + PROTECT( + auto outputs__ = torch::gather_out(*out, *self, dim, *index, (bool)sparse_grad); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gcd(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gcd_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd_(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gcd_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::ge(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->ge_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::ge_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ge(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->ge_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ge_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ge_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gelu(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::gelu(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { + PROTECT( + auto outputs__ = torch::gelu_backward(*grad, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gelu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad, tensor self) { + PROTECT( + auto outputs__ = torch::gelu_backward_out(*grad_input, *grad, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gelu_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::gelu_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_geometric_(tensor *out__, tensor self, double p) { + PROTECT( + auto outputs__ = self->geometric_(p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_geqrf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::geqrf(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_geqrf_a(tensor *out__, tensor a, tensor tau, tensor self) { + PROTECT( + auto outputs__ = torch::geqrf_out(*a, *tau, *self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_ger(tensor *out__, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::ger(*self, *vec2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::ger_out(*out, *self, *vec2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_glu(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::glu(*self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::glu_backward(*grad_output, *self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_glu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::glu_backward_out(*grad_input, *grad_output, *self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::glu_out(*out, *self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_grad(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->grad(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->greater_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->greater_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->greater_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->greater_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_greater_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::grid_sampler(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::grid_sampler_2d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::grid_sampler_2d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::grid_sampler_3d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::grid_sampler_3d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled) { + PROTECT( + auto outputs__ = torch::group_norm(*input, num_groups, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enabled); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { + PROTECT( + auto outputs__ = torch::gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { + PROTECT( + auto outputs__ = torch::gru_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gru_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { + PROTECT( + auto outputs__ = torch::gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_gt(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::gt(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gt_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->gt_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gt_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::gt_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gt_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gt(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gt_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->gt_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_gt_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gt_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hamming_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hamming_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hamming_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hamming_window_periodic_alpha(tensor *out__, int64_t window_length, int periodic, double alpha, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hamming_window_periodic_alpha_beta(tensor *out__, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hann_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hann_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::hann_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardshrink(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardshrink(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar lambd) { + PROTECT( + auto outputs__ = torch::hardshrink_backward(*grad_out, *self, *lambd); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardshrink_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_out, tensor self, scalar lambd) { + PROTECT( + auto outputs__ = torch::hardshrink_backward_out(*grad_input, *grad_out, *self, *lambd); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardshrink_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::hardshrink_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardsigmoid(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardsigmoid(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardsigmoid_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardsigmoid_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardsigmoid_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::hardsigmoid_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardsigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::hardsigmoid_backward_out(*grad_input, *grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardsigmoid_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::hardsigmoid_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardswish(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardswish_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardswish_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardswish_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardtanh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardtanh(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardtanh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardtanh_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar min_val, scalar max_val) { + PROTECT( + auto outputs__ = torch::hardtanh_backward(*grad_output, *self, *min_val, *max_val); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardtanh_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val) { + PROTECT( + auto outputs__ = torch::hardtanh_backward_out(*grad_input, *grad_output, *self, *min_val, *max_val); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::hardtanh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_heaviside(tensor *out__, tensor self, tensor values) { + PROTECT( + auto outputs__ = torch::heaviside(*self, *values); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_heaviside_(tensor *out__, tensor self, tensor values) { + PROTECT( + auto outputs__ = self->heaviside_(*values); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_heaviside_out(tensor *out__, tensor out, tensor self, tensor values) { + PROTECT( + auto outputs__ = torch::heaviside_out(*out, *self, *values); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { + PROTECT( + auto outputs__ = torch::hinge_embedding_loss(*self, *target, margin, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_histc(tensor *out__, tensor self, int64_t bins) { + PROTECT( + auto outputs__ = torch::histc(*self, bins); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { + PROTECT( + auto outputs__ = torch::histc_out(*out, *self, bins); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hsplit(tensor *out__, tensor self, int64_t sections) { + PROTECT( + auto outputs__ = torch::hsplit(*self, sections); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_hsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { + PROTECT( + auto outputs__ = torch::hsplit(*self, torch::IntArrayRef(indices_data, indices_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { + PROTECT( + auto outputs__ = torch::hspmm(*mat1, *mat2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { + PROTECT( + auto outputs__ = torch::hspmm_out(*out, *mat1, *mat2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hstack(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::hstack(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::hstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_huber_loss(tensor *out__, tensor self, tensor target, int64_t reduction, double delta) { + PROTECT( + auto outputs__ = torch::huber_loss(*self, *target, reduction, delta); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_huber_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta) { + PROTECT( + auto outputs__ = torch::huber_loss_backward(*grad_output, *self, *target, reduction, delta); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_huber_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta) { + PROTECT( + auto outputs__ = torch::huber_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, delta); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_huber_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction, double delta) { + PROTECT( + auto outputs__ = torch::huber_loss_out(*out, *self, *target, reduction, delta); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hypot(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::hypot(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hypot_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->hypot_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_hypot_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::hypot_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_i0(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::i0(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_i0_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::i0_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_i0_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::i0_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igamma(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::igamma(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igamma_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->igamma_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igamma_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::igamma_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igammac(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::igammac(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igammac_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->igammac_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_igammac_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::igammac_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { + PROTECT( + auto outputs__ = torch::im2col(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { + PROTECT( + auto outputs__ = torch::im2col_backward(*grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_im2col_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { + PROTECT( + auto outputs__ = torch::im2col_backward_out(*grad_input, *grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { + PROTECT( + auto outputs__ = torch::im2col_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_imag(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::imag(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) { + PROTECT( + auto outputs__ = torch::index(*self, of_carray_tensor_opt(indices_data, indices_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { + PROTECT( + auto outputs__ = torch::index_add(*self, dim, *index, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { + PROTECT( + auto outputs__ = self->index_add_(dim, *index, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_add_alpha(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { + PROTECT( + auto outputs__ = torch::index_add(*self, dim, *index, *source, *alpha); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_add_alpha_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { + PROTECT( + auto outputs__ = self->index_add_(dim, *index, *source, *alpha); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { + PROTECT( + auto outputs__ = torch::index_copy(*self, dim, *index, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { + PROTECT( + auto outputs__ = self->index_copy_(dim, *index, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { + PROTECT( + auto outputs__ = torch::index_fill(*self, dim, *index, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { + PROTECT( + auto outputs__ = self->index_fill_(dim, *index, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_fill_int_tensor(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { + PROTECT( + auto outputs__ = torch::index_fill(*self, dim, *index, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_fill_int_tensor_(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { + PROTECT( + auto outputs__ = self->index_fill_(dim, *index, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { + PROTECT( + auto outputs__ = torch::index_put(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { + PROTECT( + auto outputs__ = torch::index_put_(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { + PROTECT( + auto outputs__ = torch::index_select(*self, dim, *index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_select_backward(tensor *out__, tensor grad, int64_t *self_sizes_data, int self_sizes_len, int64_t dim, tensor index) { + PROTECT( + auto outputs__ = torch::index_select_backward(*grad, torch::IntArrayRef(self_sizes_data, self_sizes_len), dim, *index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { + PROTECT( + auto outputs__ = torch::index_select_out(*out, *self, dim, *index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_indices(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->indices(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_infinitely_differentiable_gelu_backward(tensor *out__, tensor grad, tensor self) { + PROTECT( + auto outputs__ = torch::infinitely_differentiable_gelu_backward(*grad, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_inner(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::inner(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_inner_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::inner_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { + PROTECT( + auto outputs__ = torch::instance_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)use_input_stats, momentum, eps, (bool)cudnn_enabled); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_int_repr(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::int_repr(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_inverse(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::inverse(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_inverse_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::inverse_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double atol, int equal_nan) { + PROTECT( + auto outputs__ = torch::isclose(*self, *other, rtol, atol, (bool)equal_nan); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isfinite(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isfinite(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin(tensor *out__, tensor elements, tensor test_elements, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin(*elements, *test_elements, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin_scalar_tensor(tensor *out__, scalar element, tensor test_elements, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin(*element, *test_elements, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin_scalar_tensor_out(tensor *out__, tensor out, scalar element, tensor test_elements, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin_out(*out, *element, *test_elements, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin_tensor_scalar(tensor *out__, tensor elements, scalar test_element, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin(*elements, *test_element, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin_tensor_scalar_out(tensor *out__, tensor out, tensor elements, scalar test_element, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin_out(*out, *elements, *test_element, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isin_tensor_tensor_out(tensor *out__, tensor out, tensor elements, tensor test_elements, int assume_unique, int invert) { + PROTECT( + auto outputs__ = torch::isin_out(*out, *elements, *test_elements, (bool)assume_unique, (bool)invert); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isinf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isinf(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isnan(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isnan(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isneginf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isneginf(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isneginf_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::isneginf_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isposinf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isposinf(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isposinf_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::isposinf_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_isreal(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isreal(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_istft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int center, int normalized, int onesided, int64_t length, int return_complex) { + PROTECT( + auto outputs__ = torch::istft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)center, (bool)normalized, (bool)onesided, length, (bool)return_complex); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kaiser_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kaiser_window_beta(tensor *out__, int64_t window_length, int periodic, double beta, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kaiser_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction, int log_target) { + PROTECT( + auto outputs__ = torch::kl_div(*self, *target, reduction, (bool)log_target); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, int log_target) { + PROTECT( + auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction, (bool)log_target); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kron(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::kron(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kron_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::kron_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::kthvalue(*self, k, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_kthvalue_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::kthvalue_out(*values, *indices, *self, k, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::l1_loss(*self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::l1_loss_backward(*grad_output, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_l1_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::l1_loss_out(*out, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable) { + PROTECT( + auto outputs__ = torch::layer_norm(*input, torch::IntArrayRef(normalized_shape_data, normalized_shape_len), (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enable); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lcm(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lcm_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm_(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lcm_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ldexp(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ldexp(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ldexp_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ldexp_(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ldexp_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ldexp_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::le(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->le_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::le_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::le(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->le_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_le_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::le_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_leaky_relu(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::leaky_relu(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_leaky_relu_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::leaky_relu_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scalar negative_slope, int self_is_result) { + PROTECT( + auto outputs__ = torch::leaky_relu_backward(*grad_output, *self, *negative_slope, (bool)self_is_result); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_leaky_relu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope, int self_is_result) { + PROTECT( + auto outputs__ = torch::leaky_relu_backward_out(*grad_input, *grad_output, *self, *negative_slope, (bool)self_is_result); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::leaky_relu_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { + PROTECT( + auto outputs__ = torch::lerp(*self, *end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { + PROTECT( + auto outputs__ = self->lerp_(*end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp_scalar_out(tensor *out__, tensor out, tensor self, tensor end, scalar weight) { + PROTECT( + auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp_tensor(tensor *out__, tensor self, tensor end, tensor weight) { + PROTECT( + auto outputs__ = torch::lerp(*self, *end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp_tensor_(tensor *out__, tensor self, tensor end, tensor weight) { + PROTECT( + auto outputs__ = self->lerp_(*end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lerp_tensor_out(tensor *out__, tensor out, tensor self, tensor end, tensor weight) { + PROTECT( + auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->less_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->less_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->less_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->less_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_less_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lgamma(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::lgamma(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lgamma_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->lgamma_(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lgamma_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::lgamma_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cholesky(tensor *out__, tensor self, int upper) { + PROTECT( + auto outputs__ = torch::linalg_cholesky(*self, (bool)upper); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cholesky_ex(tensor *out__, tensor self, int upper, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_cholesky_ex(*self, (bool)upper, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_cholesky_ex_l(tensor *out__, tensor L, tensor info, tensor self, int upper, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_cholesky_ex_out(*L, *info, *self, (bool)upper, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { + PROTECT( + auto outputs__ = torch::linalg_cholesky_out(*out, *self, (bool)upper); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cond(tensor *out__, tensor self, scalar p) { + PROTECT( + auto outputs__ = torch::linalg_cond(*self, *p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cond_out(tensor *out__, tensor out, tensor self, scalar p) { + PROTECT( + auto outputs__ = torch::linalg_cond_out(*out, *self, *p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cond_p_str(tensor *out__, tensor self, char * p) { + PROTECT( + auto outputs__ = torch::linalg_cond(*self, std::string(p)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_cond_p_str_out(tensor *out__, tensor out, tensor self, char * p) { + PROTECT( + auto outputs__ = torch::linalg_cond_out(*out, *self, std::string(p)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_det(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_det(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_det_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_det_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_eig(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_eig(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_eig_out(tensor *out__, tensor eigenvalues, tensor eigenvectors, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_eig_out(*eigenvalues, *eigenvectors, *self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_eigh(tensor *out__, tensor self, char * UPLO) { + PROTECT( + auto outputs__ = torch::linalg_eigh(*self, std::string(UPLO)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_eigh_eigvals(tensor *out__, tensor eigvals, tensor eigvecs, tensor self, char * UPLO) { + PROTECT( + auto outputs__ = torch::linalg_eigh_out(*eigvals, *eigvecs, *self, std::string(UPLO)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_eigvals(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_eigvals(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_eigvals_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_eigvals_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_eigvalsh(tensor *out__, tensor self, char * UPLO) { + PROTECT( + auto outputs__ = torch::linalg_eigvalsh(*self, std::string(UPLO)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_eigvalsh_out(tensor *out__, tensor out, tensor self, char * UPLO) { + PROTECT( + auto outputs__ = torch::linalg_eigvalsh_out(*out, *self, std::string(UPLO)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_householder_product(tensor *out__, tensor input, tensor tau) { + PROTECT( + auto outputs__ = torch::linalg_householder_product(*input, *tau); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_householder_product_out(tensor *out__, tensor out, tensor input, tensor tau) { + PROTECT( + auto outputs__ = torch::linalg_householder_product_out(*out, *input, *tau); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_inv(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_inv(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_inv_ex(tensor *out__, tensor self, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_inv_ex(*self, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_inv_ex_inverse(tensor *out__, tensor inverse, tensor info, tensor self, int check_errors) { + PROTECT( + auto outputs__ = torch::linalg_inv_ex_out(*inverse, *info, *self, (bool)check_errors); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_inv_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_inv_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_lstsq(tensor *out__, tensor self, tensor b, double rcond, char * driver) { + PROTECT( + auto outputs__ = torch::linalg_lstsq(*self, *b, rcond, std::string(driver)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_lstsq_out(tensor *out__, tensor solution, tensor residuals, tensor rank, tensor singular_values, tensor self, tensor b, double rcond, char * driver) { + PROTECT( + auto outputs__ = torch::linalg_lstsq_out(*solution, *residuals, *rank, *singular_values, *self, *b, rcond, std::string(driver)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_matmul(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::linalg_matmul(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::linalg_matmul_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_power(tensor *out__, tensor self, int64_t n) { + PROTECT( + auto outputs__ = torch::linalg_matrix_power(*self, n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_power_out(tensor *out__, tensor out, tensor self, int64_t n) { + PROTECT( + auto outputs__ = torch::linalg_matrix_power_out(*out, *self, n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_rank(tensor *out__, tensor self, double tol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank(*self, tol, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_rank_out(tensor *out__, tensor out, tensor self, double tol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank_out(*out, *self, tol, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_rank_out_tol_tensor(tensor *out__, tensor out, tensor input, tensor tol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank_out(*out, *input, *tol, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_matrix_rank_tol_tensor(tensor *out__, tensor input, tensor tol, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_matrix_rank(*input, *tol, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_multi_dot(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::linalg_multi_dot(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_multi_dot_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::linalg_multi_dot_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_pinv(tensor *out__, tensor self, double rcond, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv(*self, rcond, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_pinv_out(tensor *out__, tensor out, tensor self, double rcond, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv_out(*out, *self, rcond, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_pinv_out_rcond_tensor(tensor *out__, tensor out, tensor self, tensor rcond, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv_out(*out, *self, *rcond, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_pinv_rcond_tensor(tensor *out__, tensor self, tensor rcond, int hermitian) { + PROTECT( + auto outputs__ = torch::linalg_pinv(*self, *rcond, (bool)hermitian); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_qr(tensor *out__, tensor self, char * mode) { + PROTECT( + auto outputs__ = torch::linalg_qr(*self, std::string(mode)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, char * mode) { + PROTECT( + auto outputs__ = torch::linalg_qr_out(*Q, *R, *self, std::string(mode)); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_slogdet(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_slogdet(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_slogdet_out(tensor *out__, tensor sign, tensor logabsdet, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_slogdet_out(*sign, *logabsdet, *self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_solve(tensor *out__, tensor input, tensor other) { + PROTECT( + auto outputs__ = torch::linalg_solve(*input, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_solve_out(tensor *out__, tensor out, tensor input, tensor other) { + PROTECT( + auto outputs__ = torch::linalg_solve_out(*out, *input, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_svd(tensor *out__, tensor self, int full_matrices) { + PROTECT( + auto outputs__ = torch::linalg_svd(*self, (bool)full_matrices); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor self, int full_matrices) { + PROTECT( + auto outputs__ = torch::linalg_svd_out(*U, *S, *Vh, *self, (bool)full_matrices); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_linalg_svdvals(tensor *out__, tensor input) { + PROTECT( + auto outputs__ = torch::linalg_svdvals(*input); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_svdvals_out(tensor *out__, tensor out, tensor input) { + PROTECT( + auto outputs__ = torch::linalg_svdvals_out(*out, *input); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_tensorinv(tensor *out__, tensor self, int64_t ind) { + PROTECT( + auto outputs__ = torch::linalg_tensorinv(*self, ind); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_tensorinv_out(tensor *out__, tensor out, tensor self, int64_t ind) { + PROTECT( + auto outputs__ = torch::linalg_tensorinv_out(*out, *self, ind); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_tensorsolve(tensor *out__, tensor self, tensor other, int64_t *dims_data, int dims_len) { + PROTECT( + auto outputs__ = torch::linalg_tensorsolve(*self, *other, torch::IntArrayRef(dims_data, dims_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linalg_tensorsolve_out(tensor *out__, tensor out, tensor self, tensor other, int64_t *dims_data, int dims_len) { + PROTECT( + auto outputs__ = torch::linalg_tensorsolve_out(*out, *self, *other, torch::IntArrayRef(dims_data, dims_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { + PROTECT( + auto outputs__ = torch::linear(*input, *weight, (bias ? *bias : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linear_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias) { + PROTECT( + auto outputs__ = torch::linear_out(*out, *input, *weight, (bias ? *bias : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { + PROTECT( + auto outputs__ = torch::linspace_out(*out, *start, *end, steps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log10(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log10(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log10_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log10_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log10_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::log10_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log1p(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log1p(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log1p_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log1p_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log1p_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::log1p_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log2(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log2(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log2_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log2_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log2_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::log2_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { + PROTECT( + auto outputs__ = self->log_normal_(mean, std); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::log_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_sigmoid(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::log_sigmoid(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, tensor buffer) { + PROTECT( + auto outputs__ = torch::log_sigmoid_backward(*grad_output, *self, *buffer); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_sigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor buffer) { + PROTECT( + auto outputs__ = torch::log_sigmoid_backward_out(*grad_input, *grad_output, *self, *buffer); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::log_sigmoid_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::log_softmax(*self, dim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logaddexp(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logaddexp2(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp2(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logaddexp2_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp2_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logaddexp_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logcumsumexp(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::logcumsumexp(*self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::logcumsumexp_out(*out, *self, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logdet(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::logdet(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_and(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_and(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_and_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->logical_and_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_and_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_and_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_not(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::logical_not(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_not_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->logical_not_(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_not_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::logical_not_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_or(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_or(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_or_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->logical_or_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_or_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_or_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_xor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_xor(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_xor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->logical_xor_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logical_xor_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logit(tensor *out__, tensor self, double eps) { + PROTECT( + auto outputs__ = torch::logit(*self, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logit_(tensor *out__, tensor self, double eps) { + PROTECT( + auto outputs__ = torch::logit_(*self, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logit_backward(tensor *out__, tensor grad_output, tensor self, double eps) { + PROTECT( + auto outputs__ = torch::logit_backward(*grad_output, *self, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logit_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, double eps) { + PROTECT( + auto outputs__ = torch::logit_backward_out(*grad_input, *grad_output, *self, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logit_out(tensor *out__, tensor out, tensor self, double eps) { + PROTECT( + auto outputs__ = torch::logit_out(*out, *self, eps); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { + PROTECT( + auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { + PROTECT( + auto outputs__ = torch::lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { + PROTECT( + auto outputs__ = torch::lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_lstm_data(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { + PROTECT( + auto outputs__ = torch::lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_lstsq(tensor *out__, tensor self, tensor A) { + PROTECT( + auto outputs__ = torch::lstsq(*self, *A); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_lstsq_x(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { + PROTECT( + auto outputs__ = torch::lstsq_out(*X, *qr, *self, *A); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_lt(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::lt(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lt_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->lt_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lt_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::lt_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lt_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lt(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lt_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->lt_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lt_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lt_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { + PROTECT( + auto outputs__ = torch::lu_solve(*self, *LU_data, *LU_pivots); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, tensor LU_pivots) { + PROTECT( + auto outputs__ = torch::lu_solve_out(*out, *self, *LU_data, *LU_pivots); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_lu_unpack(tensor *out__, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots) { + PROTECT( + auto outputs__ = torch::lu_unpack(*LU_data, *LU_pivots, (bool)unpack_data, (bool)unpack_pivots); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_lu_unpack_out(tensor *out__, tensor P, tensor L, tensor U, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots) { + PROTECT( + auto outputs__ = torch::lu_unpack_out(*P, *L, *U, *LU_data, *LU_pivots, (bool)unpack_data, (bool)unpack_pivots); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { + PROTECT( + auto outputs__ = torch::margin_ranking_loss(*input1, *input2, *target, margin, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { + PROTECT( + auto outputs__ = torch::masked_fill(*self, *mask, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { + PROTECT( + auto outputs__ = self->masked_fill_(*mask, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_fill_tensor(tensor *out__, tensor self, tensor mask, tensor value) { + PROTECT( + auto outputs__ = torch::masked_fill(*self, *mask, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_fill_tensor_(tensor *out__, tensor self, tensor mask, tensor value) { + PROTECT( + auto outputs__ = self->masked_fill_(*mask, *value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { + PROTECT( + auto outputs__ = torch::masked_scatter(*self, *mask, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) { + PROTECT( + auto outputs__ = self->masked_scatter_(*mask, *source); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_select(tensor *out__, tensor self, tensor mask) { + PROTECT( + auto outputs__ = torch::masked_select(*self, *mask); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_select_backward(tensor *out__, tensor grad, tensor input, tensor mask) { + PROTECT( + auto outputs__ = torch::masked_select_backward(*grad, *input, *mask); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { + PROTECT( + auto outputs__ = torch::masked_select_out(*out, *self, *mask); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matmul(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::matmul(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::matmul_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matrix_exp(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::matrix_exp(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matrix_exp_backward(tensor *out__, tensor self, tensor grad) { + PROTECT( + auto outputs__ = torch::matrix_exp_backward(*self, *grad); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matrix_power(tensor *out__, tensor self, int64_t n) { + PROTECT( + auto outputs__ = torch::matrix_power(*self, n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matrix_power_out(tensor *out__, tensor out, tensor self, int64_t n) { + PROTECT( + auto outputs__ = torch::matrix_power_out(*out, *self, n); out__[0] = new torch::Tensor(outputs__); return 0; ) @@ -4508,43 +9977,2358 @@ return 1; int atg_matrix_rank(tensor *out__, tensor self, int symmetric) { PROTECT( - auto outputs__ = torch::matrix_rank(*self, (bool)symmetric); + auto outputs__ = torch::matrix_rank(*self, (bool)symmetric); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_matrix_rank_tol(tensor *out__, tensor self, double tol, int symmetric) { + PROTECT( + auto outputs__ = torch::matrix_rank(*self, tol, (bool)symmetric); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::max(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::max(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_dim_max(tensor *out__, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::max_out(*max, *max_values, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_other(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::max(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::max_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool1d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool2d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { + PROTECT( + auto outputs__ = torch::max_pool2d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool2d_with_indices_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { + PROTECT( + auto outputs__ = torch::max_pool2d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool2d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool3d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { + PROTECT( + auto outputs__ = torch::max_pool3d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool3d_with_indices_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { + PROTECT( + auto outputs__ = torch::max_pool3d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::max_pool3d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { + PROTECT( + auto outputs__ = torch::max_unpool2d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { + PROTECT( + auto outputs__ = torch::max_unpool2d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { + PROTECT( + auto outputs__ = torch::max_unpool2d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { + PROTECT( + auto outputs__ = torch::max_unpool2d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { + PROTECT( + auto outputs__ = torch::max_unpool3d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { + PROTECT( + auto outputs__ = torch::max_unpool3d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { + PROTECT( + auto outputs__ = torch::max_unpool3d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { + PROTECT( + auto outputs__ = torch::max_unpool3d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_maximum(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::maximum(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_maximum_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::maximum_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mean(tensor *out__, tensor self, int dtype) { + PROTECT( + auto outputs__ = torch::mean(*self, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::mean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_median(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::median(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_median_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::median(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_median_dim_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::median_out(*values, *indices, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_meshgrid_indexing(tensor *out__, tensor *tensors_data, int tensors_len, char * indexing) { + PROTECT( + auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len), std::string(indexing)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_min(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::min(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_min_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::min(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_min_dim_min(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::min_out(*min, *min_indices, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_min_other(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::min(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::min_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_minimum(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::minimum(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_minimum_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::minimum_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { + PROTECT( + auto outputs__ = torch::miopen_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon) { + PROTECT( + auto outputs__ = torch::miopen_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { + PROTECT( + auto outputs__ = torch::miopen_convolution_backward_bias(*grad_output); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_depthwise_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_depthwise_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { + PROTECT( + auto outputs__ = torch::miopen_depthwise_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { + PROTECT( + auto outputs__ = torch::miopen_rnn(*input, of_carray_tensor(weight_data, weight_len), weight_stride0, *hx, (cx ? *cx : torch::Tensor()), mode, hidden_size, num_layers, (bool)batch_first, dropout, (bool)train, (bool)bidirectional, torch::IntArrayRef(batch_sizes_data, batch_sizes_len), (dropout_state ? *dropout_state : torch::Tensor())); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + out__[4] = new torch::Tensor(std::get<4>(outputs__)); + return 0; +) +return 1; +} + +int atg_mish(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::mish(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mish_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::mish_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mish_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::mish_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mish_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::mish_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { + PROTECT( + auto outputs__ = torch::mkldnn_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_adaptive_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::mkldnn_adaptive_avg_pool2d_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::mkldnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { + PROTECT( + auto outputs__ = torch::mkldnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { + PROTECT( + auto outputs__ = torch::mkldnn_convolution_backward_weights(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_mkldnn_linear(tensor *out__, tensor self, tensor weight, tensor bias) { + PROTECT( + auto outputs__ = torch::mkldnn_linear(*self, *weight, (bias ? *bias : torch::Tensor())); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_linear_backward_input(tensor *out__, int64_t *input_size_data, int input_size_len, tensor grad_output, tensor weight) { + PROTECT( + auto outputs__ = torch::mkldnn_linear_backward_input(torch::IntArrayRef(input_size_data, input_size_len), *grad_output, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_linear_backward_weights(tensor *out__, tensor grad_output, tensor input, tensor weight, int bias_defined) { + PROTECT( + auto outputs__ = torch::mkldnn_linear_backward_weights(*grad_output, *input, *weight, (bool)bias_defined); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::mkldnn_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_max_pool2d_backward(tensor *out__, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::mkldnn_max_pool2d_backward(*grad_output, *output, *input, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::mkldnn_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_max_pool3d_backward(tensor *out__, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::mkldnn_max_pool3d_backward(*grad_output, *output, *input, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::mkldnn_reorder_conv2d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mkldnn_reorder_conv3d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::mkldnn_reorder_conv3d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mm(tensor *out__, tensor self, tensor mat2) { + PROTECT( + auto outputs__ = torch::mm(*self, *mat2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { + PROTECT( + auto outputs__ = torch::mm_out(*out, *self, *mat2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::mode(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_mode_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::mode_out(*values, *indices, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_moveaxis(tensor *out__, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len) { + PROTECT( + auto outputs__ = torch::moveaxis(*self, torch::IntArrayRef(source_data, source_len), torch::IntArrayRef(destination_data, destination_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_moveaxis_int(tensor *out__, tensor self, int64_t source, int64_t destination) { + PROTECT( + auto outputs__ = torch::moveaxis(*self, source, destination); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_movedim(tensor *out__, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len) { + PROTECT( + auto outputs__ = torch::movedim(*self, torch::IntArrayRef(source_data, source_len), torch::IntArrayRef(destination_data, destination_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_movedim_int(tensor *out__, tensor self, int64_t source, int64_t destination) { + PROTECT( + auto outputs__ = torch::movedim(*self, source, destination); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::mse_loss(*self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::mse_loss_backward(*grad_output, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mse_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::mse_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::mse_loss_out(*out, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_msort(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::msort(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_msort_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::msort_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mul(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::mul(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mul_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->mul_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::mul_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mul_scalar(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::mul(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mul_scalar_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->mul_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { + PROTECT( + auto outputs__ = torch::multi_margin_loss_backward(*grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multi_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { + PROTECT( + auto outputs__ = torch::multi_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::multilabel_margin_loss(*self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { + PROTECT( + auto outputs__ = torch::multilabel_margin_loss_backward(*grad_output, *self, *target, reduction, *is_target); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multilabel_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { + PROTECT( + auto outputs__ = torch::multilabel_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, *is_target); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { + PROTECT( + auto outputs__ = torch::multilabel_margin_loss_out(*out, *self, *target, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replacement) { + PROTECT( + auto outputs__ = torch::multinomial(*self, num_samples, (bool)replacement); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samples, int replacement) { + PROTECT( + auto outputs__ = torch::multinomial_out(*out, *self, num_samples, (bool)replacement); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multiply(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::multiply(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multiply_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->multiply_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multiply_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::multiply_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multiply_scalar(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::multiply(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_multiply_scalar_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->multiply_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mv(tensor *out__, tensor self, tensor vec) { + PROTECT( + auto outputs__ = torch::mv(*self, *vec); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { + PROTECT( + auto outputs__ = torch::mv_out(*out, *self, *vec); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { + PROTECT( + auto outputs__ = torch::mvlgamma(*self, p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { + PROTECT( + auto outputs__ = self->mvlgamma_(p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_mvlgamma_out(tensor *out__, tensor out, tensor self, int64_t p) { + PROTECT( + auto outputs__ = torch::mvlgamma_out(*out, *self, p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nan_to_num(tensor *out__, tensor self, double nan, double posinf, double neginf) { + PROTECT( + auto outputs__ = torch::nan_to_num(*self, nan, posinf, neginf); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nan_to_num_(tensor *out__, tensor self, double nan, double posinf, double neginf) { + PROTECT( + auto outputs__ = torch::nan_to_num_(*self, nan, posinf, neginf); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nan_to_num_out(tensor *out__, tensor out, tensor self, double nan, double posinf, double neginf) { + PROTECT( + auto outputs__ = torch::nan_to_num_out(*out, *self, nan, posinf, neginf); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanmean(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nanmean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanmean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nanmean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanmedian(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::nanmedian(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanmedian_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanmedian(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_nanmedian_dim_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanmedian_out(*values, *indices, *self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_nanquantile(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, *q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_new(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, *q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nanquantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nansum(tensor *out__, tensor self, int dtype) { + PROTECT( + auto outputs__ = torch::nansum(*self, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nansum_dim_intlist(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nansum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nansum_intlist_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nansum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { + PROTECT( + auto outputs__ = torch::narrow(*self, dim, start, length); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { + PROTECT( + auto outputs__ = torch::narrow_copy(*self, dim, start, length); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_narrow_copy_out(tensor *out__, tensor out, tensor self, int64_t dim, int64_t start, int64_t length) { + PROTECT( + auto outputs__ = torch::narrow_copy_out(*out, *self, dim, start, length); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_narrow_tensor(tensor *out__, tensor self, int64_t dim, tensor start, int64_t length) { + PROTECT( + auto outputs__ = torch::narrow(*self, dim, *start, length); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { + PROTECT( + auto outputs__ = torch::native_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_native_batch_norm_out(tensor *out__, tensor out, tensor save_mean, tensor save_invstd, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { + PROTECT( + auto outputs__ = torch::native_batch_norm_out(*out, *save_mean, *save_invstd, *input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_native_group_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps) { + PROTECT( + auto outputs__ = torch::native_group_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), n, C, HxW, group, eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_native_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps) { + PROTECT( + auto outputs__ = torch::native_layer_norm(*input, torch::IntArrayRef(normalized_shape_data, normalized_shape_len), (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + return 0; +) +return 1; +} + +int atg_native_norm(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::native_norm(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_native_norm_scalaropt_dim_dtype(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::native_norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::ne(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->ne_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::ne_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ne(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->ne_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ne_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::ne_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_neg(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::neg(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_neg_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::neg_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_neg_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::neg_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_negative(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::negative(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_negative_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::negative_(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_negative_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::negative_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = self->new_empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_new_empty_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = self->new_empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { + PROTECT( + auto outputs__ = self->new_full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_new_ones(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = self->new_ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = self->new_zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nextafter(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::nextafter(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nextafter_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->nextafter_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nextafter_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::nextafter_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { + PROTECT( + auto outputs__ = torch::nll_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { + PROTECT( + auto outputs__ = torch::nll_loss2d(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { + PROTECT( + auto outputs__ = torch::nll_loss2d_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { + PROTECT( + auto outputs__ = torch::nll_loss2d_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { + PROTECT( + auto outputs__ = torch::nll_loss2d_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { + PROTECT( + auto outputs__ = torch::nll_loss_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { + PROTECT( + auto outputs__ = torch::nll_loss_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss_nd(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { + PROTECT( + auto outputs__ = torch::nll_loss_nd(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { + PROTECT( + auto outputs__ = torch::nll_loss_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nonzero(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::nonzero(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nonzero_numpy(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::nonzero_numpy(*self); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_nonzero_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::nonzero_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::norm(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_dtype_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { + PROTECT( + auto outputs__ = torch::norm_except_dim(*v, pow, dim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_scalaropt_dim(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_scalaropt_dim_dtype(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_norm_scalaropt_dtype(tensor *out__, tensor self, scalar p, int dtype) { + PROTECT( + auto outputs__ = torch::norm(*self, *p, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_normal(tensor *out__, tensor out, tensor mean, double std) { + PROTECT( + auto outputs__ = torch::normal_out(*out, *mean, std); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_normal_(tensor *out__, tensor self, double mean, double std) { + PROTECT( + auto outputs__ = self->normal_(mean, std); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_normal_float_float_out(tensor *out__, tensor out, double mean, double std, int64_t *size_data, int size_len) { + PROTECT( + auto outputs__ = torch::normal_out(*out, mean, std, torch::IntArrayRef(size_data, size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_normal_float_tensor_out(tensor *out__, tensor out, double mean, tensor std) { + PROTECT( + auto outputs__ = torch::normal_out(*out, mean, *std); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_normal_tensor_tensor_out(tensor *out__, tensor out, tensor mean, tensor std) { + PROTECT( + auto outputs__ = torch::normal_out(*out, *mean, *std); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::not_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->not_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::not_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal_tensor(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::not_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal_tensor_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->not_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_not_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::not_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { + PROTECT( + auto outputs__ = torch::nuclear_norm(*self, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nuclear_norm_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::nuclear_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nuclear_norm_dim_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::nuclear_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { + PROTECT( + auto outputs__ = torch::nuclear_norm_out(*out, *self, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_numpy_t(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->numpy_T(); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { + PROTECT( + auto outputs__ = torch::one_hot(*self, num_classes); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ones_like(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::ones_like(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { + PROTECT( + auto outputs__ = torch::ones_out(*out, torch::IntArrayRef(size_data, size_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_orgqr(tensor *out__, tensor self, tensor input2) { + PROTECT( + auto outputs__ = torch::orgqr(*self, *input2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { + PROTECT( + auto outputs__ = torch::orgqr_out(*out, *self, *input2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left, int transpose) { + PROTECT( + auto outputs__ = torch::ormqr(*self, *input2, *input3, (bool)left, (bool)transpose); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose) { + PROTECT( + auto outputs__ = torch::ormqr_out(*out, *self, *input2, *input3, (bool)left, (bool)transpose); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_outer(tensor *out__, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::outer(*self, *vec2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_outer_out(tensor *out__, tensor out, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::outer_out(*out, *self, *vec2); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pad_sequence(tensor *out__, tensor *sequences_data, int sequences_len, int batch_first, double padding_value) { + PROTECT( + auto outputs__ = torch::pad_sequence(of_carray_tensor(sequences_data, sequences_len), (bool)batch_first, padding_value); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { + PROTECT( + auto outputs__ = torch::pairwise_distance(*x1, *x2, p, eps, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pdist(tensor *out__, tensor self, double p) { + PROTECT( + auto outputs__ = torch::pdist(*self, p); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { + PROTECT( + auto outputs__ = torch::permute(*self, torch::IntArrayRef(dims_data, dims_len)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pin_memory(tensor *out__, tensor self, int device) { + PROTECT( + auto outputs__ = self->pin_memory(device_of_int(device)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pinverse(tensor *out__, tensor self, double rcond) { + PROTECT( + auto outputs__ = torch::pinverse(*self, rcond); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { + PROTECT( + auto outputs__ = torch::pixel_shuffle(*self, upscale_factor); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pixel_unshuffle(tensor *out__, tensor self, int64_t downscale_factor) { + PROTECT( + auto outputs__ = torch::pixel_unshuffle(*self, downscale_factor); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_poisson(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::poisson(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction) { + PROTECT( + auto outputs__ = torch::poisson_nll_loss(*input, *target, (bool)log_input, (bool)full, eps, reduction); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_polar(tensor *out__, tensor abs, tensor angle) { + PROTECT( + auto outputs__ = torch::polar(*abs, *angle); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_polar_out(tensor *out__, tensor out, tensor abs, tensor angle) { + PROTECT( + auto outputs__ = torch::polar_out(*out, *abs, *angle); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_polygamma(tensor *out__, int64_t n, tensor self) { + PROTECT( + auto outputs__ = torch::polygamma(n, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_polygamma_(tensor *out__, tensor self, int64_t n) { + PROTECT( + auto outputs__ = self->polygamma_(n); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { + PROTECT( + auto outputs__ = torch::polygamma_out(*out, n, *self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_positive(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::positive(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow(tensor *out__, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = torch::pow(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_(tensor *out__, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = self->pow_(*exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_scalar(tensor *out__, scalar self, tensor exponent) { + PROTECT( + auto outputs__ = torch::pow(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_scalar_out(tensor *out__, tensor out, scalar self, tensor exponent) { + PROTECT( + auto outputs__ = torch::pow_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_tensor_(tensor *out__, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = self->pow_(*exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_tensor_scalar(tensor *out__, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = torch::pow(*self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar exponent) { + PROTECT( + auto outputs__ = torch::pow_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_pow_tensor_tensor_out(tensor *out__, tensor out, tensor self, tensor exponent) { + PROTECT( + auto outputs__ = torch::pow_out(*out, *self, *exponent); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_prelu(tensor *out__, tensor self, tensor weight) { + PROTECT( + auto outputs__ = torch::prelu(*self, *weight); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor weight) { + PROTECT( + auto outputs__ = torch::prelu_backward(*grad_output, *self, *weight); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_prod(tensor *out__, tensor self, int dtype) { + PROTECT( + auto outputs__ = torch::prod(*self, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_prod_dim_int(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::prod(*self, dim, (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_prod_int_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_put(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { + PROTECT( + auto outputs__ = torch::put(*self, *index, *source, (bool)accumulate); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { + PROTECT( + auto outputs__ = self->put_(*index, *source, (bool)accumulate); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_q_per_channel_scales(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::q_per_channel_scales(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_q_per_channel_zero_points(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::q_per_channel_zero_points(*self); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_qr(tensor *out__, tensor self, int some) { + PROTECT( + auto outputs__ = torch::qr(*self, (bool)some); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_qr_q(tensor *out__, tensor Q, tensor R, tensor self, int some) { + PROTECT( + auto outputs__ = torch::qr_out(*Q, *R, *self, (bool)some); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_quantile(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile(*self, *q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_new(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::quantile(*self, *q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, *q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::quantile(*self, q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, q, dim, (bool)keepdim, std::string(interpolation)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, *q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile(*self, q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, q, dim, (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { + PROTECT( + auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, torch::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t zero_point, int dtype) { + PROTECT( + auto outputs__ = torch::quantize_per_tensor(*self, scale, zero_point, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_matrix_rank1(tensor *out__, tensor self, double tol, int symmetric) { +int atg_quantize_per_tensor_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, int dtype) { PROTECT( - auto outputs__ = torch::matrix_rank(*self, tol, (bool)symmetric); + auto outputs__ = torch::quantize_per_tensor(*self, *scale, *zero_point, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max(tensor *out__, tensor self) { +int atg_quantize_per_tensor_tensors(tensor *out__, tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype) { PROTECT( - auto outputs__ = torch::max(*self); + auto outputs__ = torch::quantize_per_tensor(of_carray_tensor(tensors_data, tensors_len), *scales, *zero_points, torch::ScalarType(dtype)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; + return 0; +) +return 1; +} + +int atg_quantized_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point) { + PROTECT( + auto outputs__ = torch::quantized_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *var, eps, output_scale, output_zero_point); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max1(tensor *out__, tensor self, tensor other) { +int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( - auto outputs__ = torch::max(*self, *other); + auto outputs__ = torch::quantized_gru_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max2(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( - auto outputs__ = torch::max(*self, dim, (bool)keepdim); + auto outputs__ = torch::quantized_lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; @@ -4552,3060 +12336,3054 @@ int atg_max2(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_quantized_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( - auto outputs__ = torch::max_out(*out, *self, *other); + auto outputs__ = torch::quantized_max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_out1(tensor *out__, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim) { +int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( - auto outputs__ = torch::max_out(*max, *max_values, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::quantized_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( - auto outputs__ = torch::max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + auto outputs__ = torch::quantized_rnn_relu_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( - auto outputs__ = torch::max_pool1d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::quantized_rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_rad2deg(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + auto outputs__ = torch::rad2deg(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_rad2deg_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::max_pool2d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::rad2deg_(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +int atg_rad2deg_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::max_pool2d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + auto outputs__ = torch::rad2deg_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool2d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::max_pool2d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + auto outputs__ = torch::rand(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_rand_like(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::max_pool2d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::rand_like(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + auto outputs__ = torch::rand_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::max_pool3d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::randint(high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +int atg_randint_like(tensor *out__, tensor self, int64_t high) { PROTECT( - auto outputs__ = torch::max_pool3d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + auto outputs__ = torch::randint_like(*self, high); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool3d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +int atg_randint_like_low_dtype(tensor *out__, tensor self, int64_t low, int64_t high) { PROTECT( - auto outputs__ = torch::max_pool3d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); + auto outputs__ = torch::randint_like(*self, low, high); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_randint_low(tensor *out__, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::max_pool3d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::randint(low, high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +int atg_randint_low_out(tensor *out__, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::max_unpool2d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::randint_out(*out, low, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::max_unpool2d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::randint_out(*out, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::max_unpool2d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::randn(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +int atg_randn_like(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::max_unpool2d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::randn_like(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::max_unpool3d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::randn_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_random_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::max_unpool3d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = self->random_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_random_from_(tensor *out__, tensor self, int64_t from, int64_t to) { PROTECT( - auto outputs__ = torch::max_unpool3d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = self->random_(from, to); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_random_to_(tensor *out__, tensor self, int64_t to) { PROTECT( - auto outputs__ = torch::max_unpool3d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = self->random_(to); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_max_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::max_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::randperm(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mean(tensor *out__, tensor self, int dtype) { +int atg_randperm_out(tensor *out__, tensor out, int64_t n) { PROTECT( - auto outputs__ = torch::mean(*self, torch::ScalarType(dtype)); + auto outputs__ = torch::randperm_out(*out, n); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( - auto outputs__ = torch::mean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::range_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_median(tensor *out__, tensor self) { +int atg_range_step(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::median(*self); + auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_median1(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg_ravel(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::median(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::ravel(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_median_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { +int atg_real(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::median_out(*values, *indices, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::real(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { +int atg_reciprocal(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len)); - int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; - // return out__; + auto outputs__ = torch::reciprocal(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min(tensor *out__, tensor self) { +int atg_reciprocal_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::min(*self); + auto outputs__ = torch::reciprocal_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min1(tensor *out__, tensor self, tensor other) { +int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::min(*self, *other); + auto outputs__ = torch::reciprocal_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min2(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::min(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::reflection_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::min_out(*out, *self, *other); + auto outputs__ = torch::reflection_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min_out1(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { +int atg_reflection_pad1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::min_out(*min, *min_indices, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::reflection_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_min_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::min_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::reflection_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { +int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::reflection_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon) { +int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::reflection_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_reflection_pad2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::reflection_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { +int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution_backward_bias(*grad_output); + auto outputs__ = torch::reflection_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_reflection_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::reflection_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_reflection_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::reflection_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_reflection_pad3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::reflection_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_reflection_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::miopen_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::reflection_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_relu(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::miopen_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::relu(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_relu6(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::miopen_depthwise_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::relu6(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_relu6_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::miopen_depthwise_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::relu6_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +int atg_relu_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::miopen_depthwise_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::relu_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { +int atg_remainder(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::miopen_rnn(*input, of_carray_tensor(weight_data, weight_len), weight_stride0, *hx, (cx ? *cx : torch::Tensor()), mode, hidden_size, num_layers, (bool)batch_first, dropout, (bool)train, (bool)bidirectional, torch::IntArrayRef(batch_sizes_data, batch_sizes_len), (dropout_state ? *dropout_state : torch::Tensor())); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - out__[3] = new torch::Tensor(std::get<3>(outputs__)); - out__[4] = new torch::Tensor(std::get<4>(outputs__)); + auto outputs__ = torch::remainder(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_remainder_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::mkldnn_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +int atg_remainder_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::mkldnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + auto outputs__ = torch::remainder_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { +int atg_remainder_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::mkldnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); + auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { +int atg_remainder_tensor(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::mkldnn_convolution_backward_weights(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::remainder(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_linear(tensor *out__, tensor input, tensor weight, tensor bias) { +int atg_remainder_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::mkldnn_linear(*input, *weight, (bias ? *bias : torch::Tensor())); + auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_remainder_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::remainder_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + return 0; +) +return 1; +} + +int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( - auto outputs__ = torch::mkldnn_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + auto outputs__ = torch::renorm(*self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( - auto outputs__ = torch::mkldnn_reorder_conv2d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + auto outputs__ = self->renorm_(*p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mm(tensor *out__, tensor self, tensor mat2) { +int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( - auto outputs__ = torch::mm(*self, *mat2); + auto outputs__ = torch::renorm_out(*out, *self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { +int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_len) { PROTECT( - auto outputs__ = torch::mm_out(*out, *self, *mat2); + auto outputs__ = self->repeat(torch::IntArrayRef(repeats_data, repeats_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { +int atg_repeat_interleave(tensor *out__, tensor repeats, int64_t output_size) { PROTECT( - auto outputs__ = torch::mode(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::repeat_interleave(*repeats, output_size); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mode_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { +int atg_repeat_interleave_self_int(tensor *out__, tensor self, int64_t repeats, int64_t dim, int64_t output_size) { PROTECT( - auto outputs__ = torch::mode_out(*values, *indices, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::repeat_interleave(*self, repeats, dim, output_size); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_repeat_interleave_self_tensor(tensor *out__, tensor self, tensor repeats, int64_t dim, int64_t output_size) { PROTECT( - auto outputs__ = torch::mse_loss(*self, *target, reduction); + auto outputs__ = torch::repeat_interleave(*self, *repeats, dim, output_size); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mse_loss_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::replication_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mse_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mse_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + auto outputs__ = torch::replication_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +int atg_replication_pad1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mse_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::replication_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mul(tensor *out__, tensor self, tensor other) { +int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mul(*self, *other); + auto outputs__ = torch::replication_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mul1(tensor *out__, tensor self, scalar other) { +int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mul(*self, *other); + auto outputs__ = torch::replication_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mul_(tensor *out__, tensor self, tensor other) { +int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = self->mul_(*other); + auto outputs__ = torch::replication_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mul_1(tensor *out__, tensor self, scalar other) { +int atg_replication_pad2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = self->mul_(*other); + auto outputs__ = torch::replication_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::mul_out(*out, *self, *other); + auto outputs__ = torch::replication_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { +int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::multi_margin_loss_backward(*grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); + auto outputs__ = torch::replication_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multi_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { +int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::multi_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); + auto outputs__ = torch::replication_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_replication_pad3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::multilabel_margin_loss(*self, *target, reduction); + auto outputs__ = torch::replication_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { +int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::multilabel_margin_loss_backward(*grad_output, *self, *target, reduction, *is_target); + auto outputs__ = torch::replication_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multilabel_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { +int atg_requires_grad_(tensor *out__, tensor self, int requires_grad) { PROTECT( - auto outputs__ = torch::multilabel_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, *is_target); + auto outputs__ = self->requires_grad_((bool)requires_grad); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( - auto outputs__ = torch::multilabel_margin_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::reshape(*self, torch::IntArrayRef(shape_data, shape_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replacement) { +int atg_reshape_as(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::multinomial(*self, num_samples, (bool)replacement); + auto outputs__ = self->reshape_as(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samples, int replacement) { +int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::multinomial_out(*out, *self, num_samples, (bool)replacement); + auto outputs__ = self->resize_(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mv(tensor *out__, tensor self, tensor vec) { +int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { PROTECT( - auto outputs__ = torch::mv(*self, *vec); + auto outputs__ = torch::resize_as_(*self, *the_template); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { +int atg_resize_as_sparse_(tensor *out__, tensor self, tensor the_template) { PROTECT( - auto outputs__ = torch::mv_out(*out, *self, *vec); + auto outputs__ = torch::resize_as_sparse_(*self, *the_template); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { +int atg_resolve_conj(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::mvlgamma(*self, p); + auto outputs__ = torch::resolve_conj(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { +int atg_resolve_neg(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->mvlgamma_(p); + auto outputs__ = torch::resolve_neg(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { +int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( - auto outputs__ = torch::narrow(*self, dim, start, length); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::rnn_relu(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { +int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( - auto outputs__ = self->narrow_copy(dim, start, length); + auto outputs__ = torch::rnn_relu_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { +int atg_rnn_relu_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( - auto outputs__ = torch::native_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps); + auto outputs__ = torch::rnn_relu(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_native_layer_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps) { +int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( - auto outputs__ = torch::native_layer_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), M, n, eps); + auto outputs__ = torch::rnn_tanh(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_native_norm(tensor *out__, tensor self) { +int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( - auto outputs__ = torch::native_norm(*self); + auto outputs__ = torch::rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne(tensor *out__, tensor self, scalar other) { +int atg_rnn_tanh_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( - auto outputs__ = torch::ne(*self, *other); + auto outputs__ = torch::rnn_tanh(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + return 0; +) +return 1; +} + +int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len) { + PROTECT( + auto outputs__ = torch::roll(*self, torch::IntArrayRef(shifts_data, shifts_len), torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne1(tensor *out__, tensor self, tensor other) { +int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dims_len) { PROTECT( - auto outputs__ = torch::ne(*self, *other); + auto outputs__ = torch::rot90(*self, k, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne_(tensor *out__, tensor self, scalar other) { +int atg_round(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->ne_(*other); + auto outputs__ = torch::round(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne_1(tensor *out__, tensor self, tensor other) { +int atg_round_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->ne_(*other); + auto outputs__ = torch::round_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_round_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::ne_out(*out, *self, *other); + auto outputs__ = torch::round_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ne_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_row_stack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::ne_out(*out, *self, *other); + auto outputs__ = torch::row_stack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_neg(tensor *out__, tensor self) { +int atg_row_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::neg(*self); + auto outputs__ = torch::row_stack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_neg_(tensor *out__, tensor self) { +int atg_rrelu(tensor *out__, tensor self, int training) { PROTECT( - auto outputs__ = torch::neg_(*self); + auto outputs__ = torch::rrelu(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_neg_out(tensor *out__, tensor out, tensor self) { +int atg_rrelu_(tensor *out__, tensor self, int training) { PROTECT( - auto outputs__ = torch::neg_out(*out, *self); + auto outputs__ = torch::rrelu_(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) { PROTECT( - auto outputs__ = self->new_empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::rrelu_with_noise(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { +int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training) { PROTECT( - auto outputs__ = self->new_full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::rrelu_with_noise_(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training, int self_is_result) { PROTECT( - auto outputs__ = self->new_zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::rrelu_with_noise_backward(*grad_output, *self, *noise, *lower, *upper, (bool)training, (bool)self_is_result); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor noise, int training) { PROTECT( - auto outputs__ = torch::nll_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + auto outputs__ = torch::rrelu_with_noise_out(*out, *self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +int atg_rsqrt(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::nll_loss2d(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + auto outputs__ = torch::rsqrt(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +int atg_rsqrt_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::nll_loss2d_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + auto outputs__ = torch::rsqrt_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::nll_loss2d_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + auto outputs__ = torch::rsqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +int atg_rsub(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::nll_loss2d_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +int atg_rsub_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::nll_loss_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::nll_loss_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); + auto outputs__ = torch::scalar_tensor(*s, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::nll_loss_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); + auto outputs__ = torch::scatter(*self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nonzero(tensor *out__, tensor self) { +int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::nonzero(*self); + auto outputs__ = self->scatter_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nonzero_numpy(tensor *out__, tensor self) { +int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::nonzero_numpy(*self); - int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; - // return out__; + auto outputs__ = torch::scatter_add(*self, dim, *index, *src); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nonzero_out(tensor *out__, tensor out, tensor self) { +int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::nonzero_out(*out, *self); + auto outputs__ = self->scatter_add_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm(tensor *out__, tensor self) { +int atg_scatter_add_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::norm(*self); + auto outputs__ = torch::scatter_add_out(*out, *self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm1(tensor *out__, tensor self, scalar p, int dtype) { +int atg_scatter_reduce(tensor *out__, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( - auto outputs__ = torch::norm(*self, *p, torch::ScalarType(dtype)); + auto outputs__ = torch::scatter(*self, dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm2(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { +int atg_scatter_reduce_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( - auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = self->scatter_(dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm3(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_scatter_reduce_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( - auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { +int atg_scatter_src_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( - auto outputs__ = torch::norm_except_dim(*v, pow, dim); + auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { +int atg_scatter_value(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( - auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::scatter(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_norm_out1(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_scatter_value_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( - auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = self->scatter_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_normal_(tensor *out__, tensor self, double mean, double std) { +int atg_scatter_value_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( - auto outputs__ = self->normal_(mean, std); + auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_normal_out(tensor *out__, tensor out, tensor mean, double std) { +int atg_scatter_value_reduce(tensor *out__, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( - auto outputs__ = torch::normal_out(*out, *mean, std); + auto outputs__ = torch::scatter(*self, dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_normal_out1(tensor *out__, tensor out, double mean, tensor std) { +int atg_scatter_value_reduce_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( - auto outputs__ = torch::normal_out(*out, mean, *std); + auto outputs__ = self->scatter_(dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_normal_out2(tensor *out__, tensor out, tensor mean, tensor std) { +int atg_scatter_value_reduce_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( - auto outputs__ = torch::normal_out(*out, *mean, *std); + auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_normal_out3(tensor *out__, tensor out, double mean, double std, int64_t *size_data, int size_len) { +int atg_searchsorted(tensor *out__, tensor sorted_sequence, tensor self, int out_int32, int right) { PROTECT( - auto outputs__ = torch::normal_out(*out, mean, std, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { +int atg_searchsorted_scalar(tensor *out__, tensor sorted_sequence, scalar self, int out_int32, int right) { PROTECT( - auto outputs__ = torch::nuclear_norm(*self, (bool)keepdim); + auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nuclear_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_searchsorted_tensor_out(tensor *out__, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right) { PROTECT( - auto outputs__ = torch::nuclear_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::searchsorted_out(*out, *sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { +int atg_segment_reduce(tensor *out__, tensor data, char * reduce, tensor lengths, tensor indices, int64_t axis, int unsafe, scalar initial) { PROTECT( - auto outputs__ = torch::nuclear_norm_out(*out, *self, (bool)keepdim); + auto outputs__ = torch::segment_reduce(*data, std::string(reduce), (lengths ? *lengths : torch::Tensor()), (indices ? *indices : torch::Tensor()), axis, (bool)unsafe, *initial); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_nuclear_norm_out1(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { PROTECT( - auto outputs__ = torch::nuclear_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::select(*self, dim, index); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_numpy_t(tensor *out__, tensor self) { +int atg_select_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index) { PROTECT( - auto outputs__ = self->numpy_T(); + auto outputs__ = torch::select_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, index); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { +int atg_selu(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::one_hot(*self, num_classes); + auto outputs__ = torch::selu(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_selu_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::selu_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ones_like(tensor *out__, tensor self) { +int atg_set_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::ones_like(*self); + auto outputs__ = self->set_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ones_like1(tensor *out__, tensor self, int options_kind, int options_device) { +int atg_set_requires_grad(tensor *out__, tensor self, int r) { PROTECT( - auto outputs__ = torch::ones_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = self->set_requires_grad((bool)r); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +int atg_set_source_tensor_(tensor *out__, tensor self, tensor source) { PROTECT( - auto outputs__ = torch::ones_out(*out, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = self->set_(*source); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_orgqr(tensor *out__, tensor self, tensor input2) { +int atg_sgn(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::orgqr(*self, *input2); + auto outputs__ = torch::sgn(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { +int atg_sgn_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::orgqr_out(*out, *self, *input2); + auto outputs__ = self->sgn_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left, int transpose) { +int atg_sgn_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::ormqr(*self, *input2, *input3, (bool)left, (bool)transpose); + auto outputs__ = torch::sgn_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose) { +int atg_sigmoid(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::ormqr_out(*out, *self, *input2, *input3, (bool)left, (bool)transpose); + auto outputs__ = torch::sigmoid(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { +int atg_sigmoid_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::pairwise_distance(*x1, *x2, p, eps, (bool)keepdim); + auto outputs__ = torch::sigmoid_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pdist(tensor *out__, tensor self, double p) { +int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( - auto outputs__ = torch::pdist(*self, p); + auto outputs__ = torch::sigmoid_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { +int atg_sigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( - auto outputs__ = self->permute(torch::IntArrayRef(dims_data, dims_len)); + auto outputs__ = torch::sigmoid_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pin_memory(tensor *out__, tensor self) { +int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->pin_memory(); + auto outputs__ = torch::sigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pinverse(tensor *out__, tensor self, double rcond) { +int atg_sign(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::pinverse(*self, rcond); + auto outputs__ = torch::sign(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { +int atg_sign_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::pixel_shuffle(*self, upscale_factor); + auto outputs__ = self->sign_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_poisson(tensor *out__, tensor self) { +int atg_sign_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::poisson(*self); + auto outputs__ = torch::sign_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction) { +int atg_signbit(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::poisson_nll_loss(*input, *target, (bool)log_input, (bool)full, eps, reduction); + auto outputs__ = torch::signbit(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_polygamma(tensor *out__, int64_t n, tensor self) { +int atg_signbit_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::polygamma(n, *self); + auto outputs__ = torch::signbit_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_polygamma_(tensor *out__, tensor self, int64_t n) { +int atg_silu(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->polygamma_(n); + auto outputs__ = torch::silu(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { +int atg_silu_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::polygamma_out(*out, n, *self); + auto outputs__ = torch::silu_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow(tensor *out__, tensor self, scalar exponent) { +int atg_silu_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( - auto outputs__ = torch::pow(*self, *exponent); + auto outputs__ = torch::silu_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow1(tensor *out__, tensor self, tensor exponent) { +int atg_silu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( - auto outputs__ = torch::pow(*self, *exponent); + auto outputs__ = torch::silu_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow2(tensor *out__, scalar self, tensor exponent) { +int atg_silu_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::pow(*self, *exponent); + auto outputs__ = torch::silu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow_(tensor *out__, tensor self, scalar exponent) { +int atg_sin(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->pow_(*exponent); + auto outputs__ = torch::sin(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow_1(tensor *out__, tensor self, tensor exponent) { +int atg_sin_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->pow_(*exponent); + auto outputs__ = torch::sin_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow_out(tensor *out__, tensor out, tensor self, scalar exponent) { +int atg_sin_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::pow_out(*out, *self, *exponent); + auto outputs__ = torch::sin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow_out1(tensor *out__, tensor out, tensor self, tensor exponent) { +int atg_sinc(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::pow_out(*out, *self, *exponent); + auto outputs__ = torch::sinc(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_pow_out2(tensor *out__, tensor out, scalar self, tensor exponent) { +int atg_sinc_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::pow_out(*out, *self, *exponent); + auto outputs__ = torch::sinc_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_prelu(tensor *out__, tensor self, tensor weight) { +int atg_sinc_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::prelu(*self, *weight); + auto outputs__ = torch::sinc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor weight) { +int atg_sinh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::prelu_backward(*grad_output, *self, *weight); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::sinh(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_prod(tensor *out__, tensor self, int dtype) { +int atg_sinh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::prod(*self, torch::ScalarType(dtype)); + auto outputs__ = torch::sinh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_prod1(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { +int atg_sinh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::prod(*self, dim, (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::sinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_prod_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { +int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step) { PROTECT( - auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::slice(*self, dim, start, end, step); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { +int atg_slice_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step) { PROTECT( - auto outputs__ = self->put_(*index, *source, (bool)accumulate); + auto outputs__ = torch::slice_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, start, end, step); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_q_per_channel_scales(tensor *out__, tensor self) { +int atg_slogdet(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::q_per_channel_scales(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::slogdet(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_q_per_channel_zero_points(tensor *out__, tensor self) { +int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::q_per_channel_zero_points(*self); + auto outputs__ = torch::slow_conv3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_qr(tensor *out__, tensor self, int some) { +int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( - auto outputs__ = torch::qr(*self, (bool)some); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::slow_conv3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, int some) { +int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::qr_out(*Q, *R, *self, (bool)some); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::slow_conv_dilated2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { +int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, torch::ScalarType(dtype)); + auto outputs__ = torch::slow_conv_dilated3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t zero_point, int dtype) { +int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::quantize_per_tensor(*self, scale, zero_point, torch::ScalarType(dtype)); + auto outputs__ = torch::slow_conv_transpose2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::quantized_gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::slow_conv_transpose2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::quantized_gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::slow_conv_transpose3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( - auto outputs__ = torch::quantized_gru_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); + auto outputs__ = torch::slow_conv_transpose3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic) { +int atg_smm(tensor *out__, tensor self, tensor mat2) { PROTECT( - auto outputs__ = torch::quantized_lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first, torch::ScalarType(dtype), (bool)use_dynamic); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::smm(*self, *mat2); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic) { +int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::quantized_lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, torch::ScalarType(dtype), (bool)use_dynamic); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction, beta); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::quantized_lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction, beta); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +int atg_smooth_l1_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::quantized_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::quantized_rnn_relu_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); + auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( - auto outputs__ = torch::quantized_rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); + auto outputs__ = torch::soft_margin_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( - auto outputs__ = torch::rand(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::soft_margin_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rand_like(tensor *out__, tensor self) { +int atg_soft_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( - auto outputs__ = torch::rand_like(*self); + auto outputs__ = torch::soft_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rand_like1(tensor *out__, tensor self, int options_kind, int options_device) { +int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( - auto outputs__ = torch::rand_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::soft_margin_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::rand_out(*out, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_softplus(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::randint(high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::softplus(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint1(tensor *out__, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( - auto outputs__ = torch::randint(low, high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_like(tensor *out__, tensor self, int64_t high) { +int atg_softplus_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( - auto outputs__ = torch::randint_like(*self, high); + auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_like1(tensor *out__, tensor self, int64_t low, int64_t high) { +int atg_softplus_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::randint_like(*self, low, high); + auto outputs__ = torch::softplus_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_like2(tensor *out__, tensor self, int64_t high, int options_kind, int options_device) { +int atg_softshrink(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::randint_like(*self, high, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::softshrink(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_like3(tensor *out__, tensor self, int64_t low, int64_t high, int options_kind, int options_device) { +int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scalar lambd) { PROTECT( - auto outputs__ = torch::randint_like(*self, low, high, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::softshrink_backward(*grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, int size_len) { +int atg_softshrink_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar lambd) { PROTECT( - auto outputs__ = torch::randint_out(*out, high, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::softshrink_backward_out(*grad_input, *grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randint_out1(tensor *out__, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len) { +int atg_softshrink_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::randint_out(*out, low, high, torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::softshrink_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_solve(tensor *out__, tensor self, tensor A) { PROTECT( - auto outputs__ = torch::randn(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::solve(*self, *A); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_randn_like(tensor *out__, tensor self) { +int atg_solve_solution(tensor *out__, tensor solution, tensor lu, tensor self, tensor A) { PROTECT( - auto outputs__ = torch::randn_like(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::solve_out(*solution, *lu, *self, *A); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_randn_like1(tensor *out__, tensor self, int options_kind, int options_device) { +int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { PROTECT( - auto outputs__ = torch::randn_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::sort(*self, dim, (bool)descending); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +int atg_sort_stable(tensor *out__, tensor self, int stable, int64_t dim, int descending) { PROTECT( - auto outputs__ = torch::randn_out(*out, torch::IntArrayRef(size_data, size_len)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::sort(*self, (bool)stable, dim, (bool)descending); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_random_(tensor *out__, tensor self) { +int atg_sort_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int descending) { PROTECT( - auto outputs__ = self->random_(); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::sort_out(*values, *indices, *self, dim, (bool)descending); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_random_1(tensor *out__, tensor self, int64_t to) { +int atg_sort_values_stable(tensor *out__, tensor values, tensor indices, tensor self, int stable, int64_t dim, int descending) { PROTECT( - auto outputs__ = self->random_(to); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::sort_out(*values, *indices, *self, (bool)stable, dim, (bool)descending); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to) { +int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = self->random_(from, to); + auto outputs__ = torch::sparse_coo_tensor(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) { +int atg_sparse_coo_tensor_indices(tensor *out__, tensor indices, tensor values, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::randperm(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::sparse_coo_tensor(*indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_randperm_out(tensor *out__, tensor out, int64_t n) { +int atg_sparse_coo_tensor_indices_size(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::randperm_out(*out, n); + auto outputs__ = torch::sparse_coo_tensor(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +int atg_sparse_csr_tensor(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::sparse_csr_tensor(*crow_indices, *col_indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_range1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +int atg_sparse_csr_tensor_crow_col_value_size(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::sparse_csr_tensor(*crow_indices, *col_indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { +int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { PROTECT( - auto outputs__ = torch::range_out(*out, *start, *end); + auto outputs__ = self->sparse_mask(*mask); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_real(tensor *out__, tensor self) { +int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( - auto outputs__ = torch::real(*self); + auto outputs__ = self->sparse_resize_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_real_out(tensor *out__, tensor out, tensor self) { +int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( - auto outputs__ = torch::real_out(*out, *self); + auto outputs__ = self->sparse_resize_and_clear_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reciprocal(tensor *out__, tensor self) { +int atg_special_digamma(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reciprocal(*self); + auto outputs__ = torch::special_digamma(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reciprocal_(tensor *out__, tensor self) { +int atg_special_digamma_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::reciprocal_(*self); + auto outputs__ = torch::special_digamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { +int atg_special_entr(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reciprocal_out(*out, *self); + auto outputs__ = torch::special_entr(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_entr_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_entr_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erf(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erf(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erf_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erfc(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erfc(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erfc_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erfc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erfcx(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erfcx(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erfcx_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erfcx_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_erfinv(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::reflection_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_erfinv(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_relu(tensor *out__, tensor self) { +int atg_special_erfinv_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::relu(*self); + auto outputs__ = torch::special_erfinv_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_relu_(tensor *out__, tensor self) { +int atg_special_exp2(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::relu_(*self); + auto outputs__ = torch::special_exp2(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder(tensor *out__, tensor self, scalar other) { +int atg_special_exp2_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::remainder(*self, *other); + auto outputs__ = torch::special_exp2_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder1(tensor *out__, tensor self, tensor other) { +int atg_special_expit(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::remainder(*self, *other); + auto outputs__ = torch::special_expit(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder_(tensor *out__, tensor self, scalar other) { +int atg_special_expit_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->remainder_(*other); + auto outputs__ = torch::special_expit_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder_1(tensor *out__, tensor self, tensor other) { +int atg_special_expm1(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->remainder_(*other); + auto outputs__ = torch::special_expm1(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder_out(tensor *out__, tensor out, tensor self, scalar other) { +int atg_special_expm1_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::remainder_out(*out, *self, *other); + auto outputs__ = torch::special_expm1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_remainder_out1(tensor *out__, tensor out, tensor self, tensor other) { +int atg_special_gammainc(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::remainder_out(*out, *self, *other); + auto outputs__ = torch::special_gammainc(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { +int atg_special_gammainc_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::renorm(*self, *p, dim, *maxnorm); + auto outputs__ = torch::special_gammainc_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { +int atg_special_gammaincc(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = self->renorm_(*p, dim, *maxnorm); + auto outputs__ = torch::special_gammaincc(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm) { +int atg_special_gammaincc_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::renorm_out(*out, *self, *p, dim, *maxnorm); + auto outputs__ = torch::special_gammaincc_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_len) { +int atg_special_gammaln(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->repeat(torch::IntArrayRef(repeats_data, repeats_len)); + auto outputs__ = torch::special_gammaln(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_repeat_interleave(tensor *out__, tensor repeats) { +int atg_special_gammaln_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::repeat_interleave(*repeats); + auto outputs__ = torch::special_gammaln_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t dim) { +int atg_special_i0(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::repeat_interleave(*self, *repeats, dim); + auto outputs__ = torch::special_i0(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t dim) { +int atg_special_i0_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::repeat_interleave(*self, repeats, dim); + auto outputs__ = torch::special_i0_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i0e(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i0e(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i0e_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i0e_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i1(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i1(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i1_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i1e(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i1e(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_i1e_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_i1e_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_log1p(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_log1p(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_log1p_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::replication_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_log1p_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( - auto outputs__ = torch::replication_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_logit(tensor *out__, tensor self, double eps) { PROTECT( - auto outputs__ = torch::replication_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_logit(*self, eps); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_logit_out(tensor *out__, tensor out, tensor self, double eps) { PROTECT( - auto outputs__ = torch::replication_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_logit_out(*out, *self, eps); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +int atg_special_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = torch::replication_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::special_logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_requires_grad_(tensor *out__, tensor self, int _requires_grad) { +int atg_special_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( - auto outputs__ = self->requires_grad_((bool)_requires_grad); + auto outputs__ = torch::special_logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { +int atg_special_multigammaln(tensor *out__, tensor self, int64_t p) { PROTECT( - auto outputs__ = torch::reshape(*self, torch::IntArrayRef(shape_data, shape_len)); + auto outputs__ = torch::special_multigammaln(*self, p); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_reshape_as(tensor *out__, tensor self, tensor other) { +int atg_special_multigammaln_out(tensor *out__, tensor out, tensor self, int64_t p) { PROTECT( - auto outputs__ = self->reshape_as(*other); + auto outputs__ = torch::special_multigammaln_out(*out, *self, p); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { +int atg_special_ndtr(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->resize_(torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::special_ndtr(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { +int atg_special_ndtr_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::resize_as_(*self, *the_template); + auto outputs__ = torch::special_ndtr_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided) { +int atg_special_ndtri(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::rfft(*self, signal_ndim, (bool)normalized, (bool)onesided); + auto outputs__ = torch::special_ndtri(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +int atg_special_ndtri_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::rnn_relu(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::special_ndtri_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_relu1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +int atg_special_polygamma(tensor *out__, int64_t n, tensor self) { PROTECT( - auto outputs__ = torch::rnn_relu(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::special_polygamma(n, *self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +int atg_special_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { PROTECT( - auto outputs__ = torch::rnn_relu_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); + auto outputs__ = torch::special_polygamma_out(*out, n, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +int atg_special_psi(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::rnn_tanh(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::special_psi(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_tanh1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +int atg_special_psi_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::rnn_tanh(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::special_psi_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +int atg_special_round(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); + auto outputs__ = torch::special_round(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len) { +int atg_special_round_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::roll(*self, torch::IntArrayRef(shifts_data, shifts_len), torch::IntArrayRef(dims_data, dims_len)); + auto outputs__ = torch::special_round_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dims_len) { +int atg_special_sinc(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::rot90(*self, k, torch::IntArrayRef(dims_data, dims_len)); + auto outputs__ = torch::special_sinc(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_round(tensor *out__, tensor self) { +int atg_special_sinc_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::round(*self); + auto outputs__ = torch::special_sinc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_round_(tensor *out__, tensor self) { +int atg_special_xlog1py(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::round_(*self); + auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_round_out(tensor *out__, tensor out, tensor self) { +int atg_special_xlog1py_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::round_out(*out, *self); + auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu(tensor *out__, tensor self, int training) { +int atg_special_xlog1py_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::rrelu(*self, (bool)training); + auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_(tensor *out__, tensor self, int training) { +int atg_special_xlog1py_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::rrelu_(*self, (bool)training); + auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) { +int atg_special_xlog1py_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::rrelu_with_noise(*self, *noise, (bool)training); + auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training) { +int atg_special_xlog1py_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::rrelu_with_noise_(*self, *noise, (bool)training); + auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { +int atg_special_xlogy(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::rrelu_with_noise_backward(*grad_output, *self, *noise, *lower, *upper, (bool)training); + auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_with_noise_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { +int atg_special_xlogy_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::rrelu_with_noise_backward_out(*grad_input, *grad_output, *self, *noise, *lower, *upper, (bool)training); + auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor noise, int training) { +int atg_special_xlogy_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::rrelu_with_noise_out(*out, *self, *noise, (bool)training); + auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rsqrt(tensor *out__, tensor self) { +int atg_special_xlogy_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::rsqrt(*self); + auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rsqrt_(tensor *out__, tensor self) { +int atg_special_xlogy_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::rsqrt_(*self); + auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { +int atg_special_xlogy_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::rsqrt_out(*out, *self); + auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rsub(tensor *out__, tensor self, tensor other) { +int atg_special_zeta(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::rsub(*self, *other); + auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_rsub1(tensor *out__, tensor self, scalar other) { +int atg_special_zeta_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::rsub(*self, *other); + auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_device) { +int atg_special_zeta_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::scalar_tensor(*s, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +int atg_special_zeta_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::scatter(*self, dim, *index, *src); + auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_scatter1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +int atg_special_zeta_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::scatter(*self, dim, *index, *value); + auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +int atg_special_zeta_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( - auto outputs__ = self->scatter_(dim, *index, *src); + auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_scatter_1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { PROTECT( - auto outputs__ = self->scatter_(dim, *index, *value); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::split(*self, split_size, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { PROTECT( - auto outputs__ = torch::scatter_add(*self, dim, *index, *src); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +int atg_sqrt(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->scatter_add_(dim, *index, *src); + auto outputs__ = torch::sqrt(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { +int atg_sqrt_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::select(*self, dim, index); + auto outputs__ = torch::sqrt_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_selu(tensor *out__, tensor self) { +int atg_sqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::selu(*self); + auto outputs__ = torch::sqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_selu_(tensor *out__, tensor self) { +int atg_square(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::selu_(*self); + auto outputs__ = torch::square(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_set_(tensor *out__, tensor self) { +int atg_square_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->set_(); + auto outputs__ = torch::square_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_set_1(tensor *out__, tensor self, tensor source) { +int atg_square_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->set_(*source); + auto outputs__ = torch::square_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_set_requires_grad(tensor *out__, tensor self, int r) { +int atg_squeeze(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->set_requires_grad((bool)r); + auto outputs__ = torch::squeeze(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sigmoid(tensor *out__, tensor self) { +int atg_squeeze_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sigmoid(*self); + auto outputs__ = self->squeeze_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sigmoid_(tensor *out__, tensor self) { +int atg_squeeze_dim(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = torch::sigmoid_(*self); + auto outputs__ = torch::squeeze(*self, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { +int atg_squeeze_dim_(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = torch::sigmoid_backward(*grad_output, *output); + auto outputs__ = self->squeeze_(dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { +int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( - auto outputs__ = torch::sigmoid_backward_out(*grad_input, *grad_output, *output); + auto outputs__ = torch::sspaddmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { +int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( - auto outputs__ = torch::sigmoid_out(*out, *self); + auto outputs__ = torch::sspaddmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sign(tensor *out__, tensor self) { +int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = torch::sign(*self); + auto outputs__ = torch::stack(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sign_(tensor *out__, tensor self) { +int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( - auto outputs__ = self->sign_(); + auto outputs__ = torch::stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sign_out(tensor *out__, tensor out, tensor self) { +int atg_std(tensor *out__, tensor self, int unbiased) { PROTECT( - auto outputs__ = torch::sign_out(*out, *self); + auto outputs__ = torch::std(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sin(tensor *out__, tensor self) { +int atg_std_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::sin(*self); + auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sin_(tensor *out__, tensor self) { +int atg_std_correction_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::sin_(*self); + auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sin_out(tensor *out__, tensor out, tensor self) { +int atg_std_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::sin_out(*out, *self); + auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sinh(tensor *out__, tensor self) { +int atg_std_mean(tensor *out__, tensor self, int unbiased) { PROTECT( - auto outputs__ = torch::sinh(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::std_mean(*self, (bool)unbiased); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_sinh_(tensor *out__, tensor self) { +int atg_std_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::sinh_(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_sinh_out(tensor *out__, tensor out, tensor self) { +int atg_std_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::sinh_out(*out, *self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step) { +int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::slice(*self, dim, start, end, step); + auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slogdet(tensor *out__, tensor self) { +int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided, int return_complex) { PROTECT( - auto outputs__ = torch::slogdet(*self); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::stft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided, (bool)return_complex); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_sub(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +int atg_sub_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); + auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { +int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv_dilated2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::sub_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { +int atg_sub_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::slow_conv_dilated3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +int atg_sub_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::slow_conv_transpose2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +int atg_subtract(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv_transpose2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::subtract(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +int atg_subtract_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv_transpose3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = self->subtract_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +int atg_subtract_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::slow_conv_transpose3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); + auto outputs__ = torch::subtract_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_smm(tensor *out__, tensor self, tensor mat2) { +int atg_subtract_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::smm(*self, *mat2); + auto outputs__ = torch::subtract(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_subtract_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction); + auto outputs__ = self->subtract_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_sum(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::sum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_sum_dim_intlist(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + auto outputs__ = torch::sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +int atg_sum_intlist_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::soft_margin_loss(*self, *target, reduction); + auto outputs__ = self->sum_to_size(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { PROTECT( - auto outputs__ = torch::soft_margin_loss_backward(*grad_output, *self, *target, reduction); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::svd(*self, (bool)some, (bool)compute_uv); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_soft_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +int atg_svd_u(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv) { PROTECT( - auto outputs__ = torch::soft_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::svd_out(*U, *S, *V, *self, (bool)some, (bool)compute_uv); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +int atg_swapaxes(tensor *out__, tensor self, int64_t axis0, int64_t axis1) { PROTECT( - auto outputs__ = torch::soft_margin_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::swapaxes(*self, axis0, axis1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { +int atg_swapaxes_(tensor *out__, tensor self, int64_t axis0, int64_t axis1) { PROTECT( - auto outputs__ = torch::softmax(*self, dim, torch::ScalarType(dtype)); + auto outputs__ = self->swapaxes_(axis0, axis1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softplus(tensor *out__, tensor self) { +int atg_swapdims(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = torch::softplus(*self); + auto outputs__ = torch::swapdims(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +int atg_swapdims_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold, *output); + auto outputs__ = self->swapdims_(dim0, dim1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softplus_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { PROTECT( - auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold, *output); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::symeig(*self, (bool)eigenvectors, (bool)upper); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_softplus_out(tensor *out__, tensor out, tensor self) { +int atg_symeig_e(tensor *out__, tensor e, tensor V, tensor self, int eigenvectors, int upper) { PROTECT( - auto outputs__ = torch::softplus_out(*out, *self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::symeig_out(*e, *V, *self, (bool)eigenvectors, (bool)upper); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_softshrink(tensor *out__, tensor self) { +int atg_t(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::softshrink(*self); + auto outputs__ = torch::t(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scalar lambd) { +int atg_t_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::softshrink_backward(*grad_output, *self, *lambd); + auto outputs__ = self->t_(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softshrink_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar lambd) { +int atg_take(tensor *out__, tensor self, tensor index) { PROTECT( - auto outputs__ = torch::softshrink_backward_out(*grad_input, *grad_output, *self, *lambd); + auto outputs__ = torch::take(*self, *index); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_softshrink_out(tensor *out__, tensor out, tensor self) { +int atg_take_along_dim(tensor *out__, tensor self, tensor indices, int64_t dim) { PROTECT( - auto outputs__ = torch::softshrink_out(*out, *self); + auto outputs__ = torch::take_along_dim(*self, *indices, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_solve(tensor *out__, tensor self, tensor A) { +int atg_take_along_dim_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t dim) { PROTECT( - auto outputs__ = torch::solve(*self, *A); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::take_along_dim_out(*out, *self, *indices, dim); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_solve_out(tensor *out__, tensor solution, tensor lu, tensor self, tensor A) { +int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { PROTECT( - auto outputs__ = torch::solve_out(*solution, *lu, *self, *A); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::take_out(*out, *self, *index); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { +int atg_tan(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sort(*self, dim, (bool)descending); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::tan(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sort_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int descending) { +int atg_tan_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sort_out(*values, *indices, *self, dim, (bool)descending); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::tan_(*self); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_tan_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::sparse_coo_tensor(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::tan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_coo_tensor1(tensor *out__, tensor indices, tensor values, int options_kind, int options_device) { +int atg_tanh(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sparse_coo_tensor(*indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::tanh(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_coo_tensor2(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_tanh_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sparse_coo_tensor(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::tanh_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { +int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( - auto outputs__ = self->sparse_mask(*mask); + auto outputs__ = torch::tanh_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { +int atg_tanh_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( - auto outputs__ = self->sparse_resize_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); + auto outputs__ = torch::tanh_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { +int atg_tanh_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->sparse_resize_and_clear_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); + auto outputs__ = torch::tanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { +int atg_tensor_split(tensor *out__, tensor self, int64_t sections, int64_t dim) { PROTECT( - auto outputs__ = torch::split(*self, split_size, dim); + auto outputs__ = torch::tensor_split(*self, sections, dim); int sz = outputs__.size(); // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) @@ -7617,9 +15395,9 @@ int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { return 1; } -int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { +int atg_tensor_split_indices(tensor *out__, tensor self, int64_t *indices_data, int indices_len, int64_t dim) { PROTECT( - auto outputs__ = torch::split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); + auto outputs__ = torch::tensor_split(*self, torch::IntArrayRef(indices_data, indices_len), dim); int sz = outputs__.size(); // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) @@ -7631,1208 +15409,1237 @@ int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, return 1; } -int atg_sqrt(tensor *out__, tensor self) { +int atg_tensor_split_tensor_indices_or_sections(tensor *out__, tensor self, tensor tensor_indices_or_sections, int64_t dim) { PROTECT( - auto outputs__ = torch::sqrt(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::tensor_split(*self, *tensor_indices_or_sections, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_sqrt_(tensor *out__, tensor self) { +int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { PROTECT( - auto outputs__ = torch::sqrt_(*self); + auto outputs__ = torch::tensordot(*self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sqrt_out(tensor *out__, tensor out, tensor self) { +int atg_tensordot_out(tensor *out__, tensor out, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { PROTECT( - auto outputs__ = torch::sqrt_out(*out, *self); + auto outputs__ = torch::tensordot_out(*out, *self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_squeeze(tensor *out__, tensor self) { +int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( - auto outputs__ = torch::squeeze(*self); + auto outputs__ = torch::threshold(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_squeeze1(tensor *out__, tensor self, int64_t dim) { +int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( - auto outputs__ = torch::squeeze(*self, dim); + auto outputs__ = torch::threshold_(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_squeeze_(tensor *out__, tensor self) { +int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scalar threshold) { PROTECT( - auto outputs__ = self->squeeze_(); + auto outputs__ = torch::threshold_backward(*grad_output, *self, *threshold); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_squeeze_1(tensor *out__, tensor self, int64_t dim) { +int atg_threshold_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar threshold) { PROTECT( - auto outputs__ = self->squeeze_(dim); + auto outputs__ = torch::threshold_backward_out(*grad_input, *grad_output, *self, *threshold); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { +int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, scalar value) { PROTECT( - auto outputs__ = torch::sspaddmm(*self, *mat1, *mat2); + auto outputs__ = torch::threshold_out(*out, *self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { +int atg_tile(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( - auto outputs__ = torch::sspaddmm_out(*out, *self, *mat1, *mat2); + auto outputs__ = torch::tile(*self, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { +int atg_to(tensor *out__, tensor self, int device) { PROTECT( - auto outputs__ = torch::stack(of_carray_tensor(tensors_data, tensors_len), dim); + auto outputs__ = self->to(device_of_int(device)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { +int atg_to_dense(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); + auto outputs__ = self->to_dense(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_std(tensor *out__, tensor self, int unbiased) { +int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { PROTECT( - auto outputs__ = torch::std(*self, (bool)unbiased); + auto outputs__ = torch::to_dense_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_std1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_to_device(tensor *out__, tensor self, int device, int dtype, int non_blocking, int copy) { PROTECT( - auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + auto outputs__ = self->to(device_of_int(device), torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_std_mean(tensor *out__, tensor self, int unbiased) { +int atg_to_dtype(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { PROTECT( - auto outputs__ = torch::std_mean(*self, (bool)unbiased); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = self->to(torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_std_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_to_dtype_layout(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking, int copy) { PROTECT( - auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = self->to(at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking, (bool)copy); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_to_mkldnn(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + auto outputs__ = self->to_mkldnn(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided) { +int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { PROTECT( - auto outputs__ = torch::stft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided); + auto outputs__ = torch::to_mkldnn_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sub(tensor *out__, tensor self, tensor other) { +int atg_to_other(tensor *out__, tensor self, tensor other, int non_blocking, int copy) { PROTECT( - auto outputs__ = torch::sub(*self, *other); + auto outputs__ = self->to(*other, (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sub1(tensor *out__, tensor self, scalar other) { +int atg_to_sparse(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sub(*self, *other); + auto outputs__ = self->to_sparse(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sub_(tensor *out__, tensor self, tensor other) { +int atg_to_sparse_sparse_dim(tensor *out__, tensor self, int64_t sparse_dim) { PROTECT( - auto outputs__ = self->sub_(*other); + auto outputs__ = self->to_sparse(sparse_dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sub_1(tensor *out__, tensor self, scalar other) { +int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, int sorted) { PROTECT( - auto outputs__ = self->sub_(*other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::topk(*self, k, dim, (bool)largest, (bool)sorted); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { +int atg_topk_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted) { PROTECT( - auto outputs__ = torch::sub_out(*out, *self, *other); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::topk_out(*values, *indices, *self, k, dim, (bool)largest, (bool)sorted); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_sum(tensor *out__, tensor self, int dtype) { +int atg_totype(tensor *out__, tensor self, int scalar_type) { PROTECT( - auto outputs__ = torch::sum(*self, torch::ScalarType(dtype)); + auto outputs__ = self->toType(torch::ScalarType(scalar_type)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sum1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_trace(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::trace(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +int atg_trace_backward(tensor *out__, tensor grad, int64_t *sizes_data, int sizes_len) { PROTECT( - auto outputs__ = torch::sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); + auto outputs__ = torch::trace_backward(*grad, torch::IntArrayRef(sizes_data, sizes_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len) { +int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = self->sum_to_size(torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::transpose(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { +int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( - auto outputs__ = torch::svd(*self, (bool)some, (bool)compute_uv); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = self->transpose_(dim0, dim1); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_svd_out(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv) { +int atg_trapezoid(tensor *out__, tensor y, int64_t dim) { PROTECT( - auto outputs__ = torch::svd_out(*U, *S, *V, *self, (bool)some, (bool)compute_uv); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::trapezoid(*y, dim); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { +int atg_trapezoid_x(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( - auto outputs__ = torch::symeig(*self, (bool)eigenvectors, (bool)upper); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::trapezoid(*y, *x, dim); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_symeig_out(tensor *out__, tensor e, tensor V, tensor self, int eigenvectors, int upper) { +int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( - auto outputs__ = torch::symeig_out(*e, *V, *self, (bool)eigenvectors, (bool)upper); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::trapz(*y, *x, dim); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_t(tensor *out__, tensor self) { +int atg_trapz_dx(tensor *out__, tensor y, double dx, int64_t dim) { PROTECT( - auto outputs__ = torch::t(*self); + auto outputs__ = torch::trapz(*y, dx, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_t_(tensor *out__, tensor self) { +int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { PROTECT( - auto outputs__ = self->t_(); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::triangular_solve(*self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_take(tensor *out__, tensor self, tensor index) { +int atg_triangular_solve_x(tensor *out__, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular) { PROTECT( - auto outputs__ = torch::take(*self, *index); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::triangular_solve_out(*X, *M, *self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { +int atg_tril(tensor *out__, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::take_out(*out, *self, *index); + auto outputs__ = torch::tril(*self, diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tan(tensor *out__, tensor self) { +int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::tan(*self); + auto outputs__ = self->tril_(diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tan_(tensor *out__, tensor self) { +int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::tan_(*self); + auto outputs__ = torch::tril_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tan_out(tensor *out__, tensor out, tensor self) { +int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::tan_out(*out, *self); + auto outputs__ = torch::tril_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tanh(tensor *out__, tensor self) { +int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction) { PROTECT( - auto outputs__ = torch::tanh(*self); + auto outputs__ = torch::triplet_margin_loss(*anchor, *positive, *negative, margin, p, eps, (bool)swap, reduction); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tanh_(tensor *out__, tensor self) { +int atg_triu(tensor *out__, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::tanh_(*self); + auto outputs__ = torch::triu(*self, diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { +int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::tanh_backward(*grad_output, *output); + auto outputs__ = self->triu_(diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { +int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::tanh_backward_out(*grad_input, *grad_output, *output); + auto outputs__ = torch::triu_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tanh_out(tensor *out__, tensor out, tensor self) { +int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( - auto outputs__ = torch::tanh_out(*out, *self); + auto outputs__ = torch::triu_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { +int atg_true_divide(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::tensordot(*self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); + auto outputs__ = torch::true_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { +int atg_true_divide_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::threshold(*self, *threshold, *value); + auto outputs__ = self->true_divide_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { +int atg_true_divide_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::threshold_(*self, *threshold, *value); + auto outputs__ = torch::true_divide_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scalar threshold) { +int atg_true_divide_scalar(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::threshold_backward(*grad_output, *self, *threshold); + auto outputs__ = torch::true_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, scalar value) { +int atg_true_divide_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::threshold_out(*out, *self, *threshold, *value); + auto outputs__ = self->true_divide_(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to(tensor *out__, tensor self, int device) { +int atg_trunc(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->to(device_of_int(device)); - // auto t = new torch::Tensor(outputs__); + auto outputs__ = torch::trunc(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to1(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking, int copy) { +int atg_trunc_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = self->to(at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking, (bool)copy); + auto outputs__ = torch::trunc_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to2(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { +int atg_trunc_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = self->to(torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); + auto outputs__ = torch::trunc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to3(tensor *out__, tensor self, tensor other, int non_blocking, int copy) { +int atg_type_as(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = self->to(*other, (bool)non_blocking, (bool)copy); + auto outputs__ = self->type_as(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to4(tensor *out__, tensor self, int device, int dtype, int non_blocking, int copy) { +int atg_unbind(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = self->to(device_of_int(device), torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unbind(*self, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_to_dense(tensor *out__, tensor self) { +int atg_unflatten(tensor *out__, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len) { PROTECT( - auto outputs__ = self->to_dense(); + auto outputs__ = self->unflatten(dim, torch::IntArrayRef(sizes_data, sizes_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { +int atg_unflatten_dense_tensors(tensor *out__, tensor flat, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::to_dense_backward(*grad, *input); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unflatten_dense_tensors(*flat, of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_to_mkldnn(tensor *out__, tensor self) { +int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { PROTECT( - auto outputs__ = self->to_mkldnn(); + auto outputs__ = self->unfold(dimension, size, step); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { +int atg_unfold_backward(tensor *out__, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step) { PROTECT( - auto outputs__ = torch::to_mkldnn_backward(*grad, *input); + auto outputs__ = torch::unfold_backward(*grad_in, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, size, step); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to_sparse(tensor *out__, tensor self) { +int atg_uniform_(tensor *out__, tensor self, double from, double to) { PROTECT( - auto outputs__ = self->to_sparse(); + auto outputs__ = self->uniform_(from, to); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_to_sparse1(tensor *out__, tensor self, int64_t sparse_dim) { +int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { PROTECT( - auto outputs__ = self->to_sparse(sparse_dim); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unique_consecutive(*self, (bool)return_inverse, (bool)return_counts, dim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, int sorted) { +int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts) { PROTECT( - auto outputs__ = torch::topk(*self, k, dim, (bool)largest, (bool)sorted); + auto outputs__ = torch::unique_dim(*self, dim, (bool)sorted, (bool)return_inverse, (bool)return_counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_topk_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted) { +int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int return_inverse, int return_counts) { PROTECT( - auto outputs__ = torch::topk_out(*values, *indices, *self, k, dim, (bool)largest, (bool)sorted); + auto outputs__ = torch::unique_dim_consecutive(*self, dim, (bool)return_inverse, (bool)return_counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); return 0; ) return 1; } -int atg_totype(tensor *out__, tensor self, int scalar_type) { +int atg_unsafe_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { PROTECT( - auto outputs__ = self->toType(torch::ScalarType(scalar_type)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unsafe_chunk(*self, chunks, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_trace(tensor *out__, tensor self) { +int atg_unsafe_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { PROTECT( - auto outputs__ = torch::trace(*self); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unsafe_split(*self, split_size, dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { +int atg_unsafe_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { PROTECT( - auto outputs__ = torch::transpose(*self, dim0, dim1); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::unsafe_split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { +int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = self->transpose_(dim0, dim1); + auto outputs__ = torch::unsqueeze(*self, dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { +int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { PROTECT( - auto outputs__ = torch::trapz(*y, *x, dim); + auto outputs__ = self->unsqueeze_(dim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_trapz1(tensor *out__, tensor y, double dx, int64_t dim) { +int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::trapz(*y, dx, dim); + auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { +int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::triangular_solve(*self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triangular_solve_out(tensor *out__, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular) { +int atg_upsample_bicubic2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::triangular_solve_out(*X, *M, *self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tril(tensor *out__, tensor self, int64_t diagonal) { +int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::tril(*self, diagonal); + auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { +int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = self->tril_(diagonal); + auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { +int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::tril_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +int atg_upsample_bilinear2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::tril_out(*out, *self, diagonal); + auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction) { +int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::triplet_margin_loss(*anchor, *positive, *negative, margin, p, eps, (bool)swap, reduction); + auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triu(tensor *out__, tensor self, int64_t diagonal) { +int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { PROTECT( - auto outputs__ = torch::triu(*self, diagonal); + auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { +int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { PROTECT( - auto outputs__ = self->triu_(diagonal); + auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { +int atg_upsample_linear1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { PROTECT( - auto outputs__ = torch::triu_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { PROTECT( - auto outputs__ = torch::triu_out(*out, *self, diagonal); + auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_trunc(tensor *out__, tensor self) { +int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales) { PROTECT( - auto outputs__ = torch::trunc(*self); + auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_trunc_(tensor *out__, tensor self) { +int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { PROTECT( - auto outputs__ = torch::trunc_(*self); + auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_trunc_out(tensor *out__, tensor out, tensor self) { +int atg_upsample_nearest1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { PROTECT( - auto outputs__ = torch::trunc_out(*out, *self); + auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_type_as(tensor *out__, tensor self, tensor other) { +int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales) { PROTECT( - auto outputs__ = self->type_as(*other); + auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unbind(tensor *out__, tensor self, int64_t dim) { +int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::unbind(*self, dim); - int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; - // return out__; + auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { +int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { PROTECT( - auto outputs__ = self->unfold(dimension, size, step); + auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_uniform_(tensor *out__, tensor self, double from, double to) { +int atg_upsample_nearest2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { PROTECT( - auto outputs__ = self->uniform_(from, to); + auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { +int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::unique_consecutive(*self, (bool)return_inverse, (bool)return_counts, dim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts) { +int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::unique_dim(*self, dim, (bool)sorted, (bool)return_inverse, (bool)return_counts); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int return_inverse, int return_counts) { +int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::unique_dim_consecutive(*self, dim, (bool)return_inverse, (bool)return_counts); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); + auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { +int atg_upsample_nearest3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::unsqueeze(*self, dim); + auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { +int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = self->unsqueeze_(dim); + auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_upsample_trilinear3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_value_selecting_reduction_backward(tensor *out__, tensor grad, int64_t dim, tensor indices, int64_t *sizes_data, int sizes_len, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::value_selecting_reduction_backward(*grad, dim, *indices, torch::IntArrayRef(sizes_data, sizes_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_values(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = self->values(); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_vander(tensor *out__, tensor x, int64_t n, int increasing) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::vander(*x, n, (bool)increasing); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_var(tensor *out__, tensor self, int unbiased) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::var(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_var_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_var_correction_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_var_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_var_mean(tensor *out__, tensor self, int unbiased) { PROTECT( - auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::var_mean(*self, (bool)unbiased); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_var_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_var_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); return 0; ) return 1; } -int atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); + auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_vdot(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::vdot(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_vdot_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::vdot_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); + auto outputs__ = self->view(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_view_as(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); + auto outputs__ = self->view_as(*other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_view_as_complex(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::view_as_complex(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_view_as_real(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); + auto outputs__ = torch::view_as_real(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_view_dtype(tensor *out__, tensor self, int dtype) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); + auto outputs__ = self->view(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +int atg_vsplit(tensor *out__, tensor self, int64_t sections) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::vsplit(*self, sections); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +int atg_vsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::vsplit(*self, torch::IntArrayRef(indices_data, indices_len)); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_vstack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::vstack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_vstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); + auto outputs__ = torch::vstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +int atg_where(tensor *out__, tensor condition) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); - out__[0] = new torch::Tensor(outputs__); + auto outputs__ = torch::where(*condition); + int sz = outputs__.size(); + // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + // return out__; return 0; ) return 1; } -int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +int atg_where_scalar(tensor *out__, tensor condition, scalar self, scalar other) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); + auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_values(tensor *out__, tensor self) { +int atg_where_scalarother(tensor *out__, tensor condition, tensor self, scalar other) { PROTECT( - auto outputs__ = self->values(); + auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_var(tensor *out__, tensor self, int unbiased) { +int atg_where_scalarself(tensor *out__, tensor condition, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::var(*self, (bool)unbiased); + auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_var1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_where_self(tensor *out__, tensor condition, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_var_mean(tensor *out__, tensor self, int unbiased) { +int atg_xlogy(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::var_mean(*self, (bool)unbiased); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::xlogy(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_var_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_xlogy_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); + auto outputs__ = torch::xlogy_(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +int atg_xlogy_outscalar_other(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); + auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { +int atg_xlogy_outscalar_self(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( - auto outputs__ = self->view(torch::IntArrayRef(size_data, size_len)); + auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_view_as(tensor *out__, tensor self, tensor other) { +int atg_xlogy_outtensor(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( - auto outputs__ = self->view_as(*other); + auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_where(tensor *out__, tensor condition) { +int atg_xlogy_scalar_other(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::where(*condition); - int sz = outputs__.size(); - for (int i = 0; i < sz; ++i) - out__[i] = new torch::Tensor(outputs__[i]); - out__[sz] = nullptr; + auto outputs__ = torch::xlogy(*self, *other); + out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_where1(tensor *out__, tensor condition, tensor self, tensor other) { +int atg_xlogy_scalar_other_(tensor *out__, tensor self, scalar other) { PROTECT( - auto outputs__ = torch::where(*condition, *self, *other); + auto outputs__ = torch::xlogy_(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_zero_(tensor *out__, tensor self) { +int atg_xlogy_scalar_self(tensor *out__, scalar self, tensor other) { PROTECT( - auto outputs__ = torch::zero_(*self); + auto outputs__ = torch::xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +int atg_zero_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::zero_(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_zeros_like(tensor *out__, tensor self) { +int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::zeros_like(*self); + auto outputs__ = torch::zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); return 0; ) return 1; } -int atg_zeros_like1(tensor *out__, tensor self, int options_kind, int options_device) { +int atg_zeros_like(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::zeros_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::zeros_like(*self); out__[0] = new torch::Tensor(outputs__); return 0; ) diff --git a/deps/c_wrapper/torch_api_generated.h b/deps/c_wrapper/torch_api_generated.h index d2f4e24a..63657b70 100644 --- a/deps/c_wrapper/torch_api_generated.h +++ b/deps/c_wrapper/torch_api_generated.h @@ -1,32 +1,234 @@ // THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! +int atg___and__(tensor *, tensor self, scalar other); +int atg___and__tensor_(tensor *, tensor self, tensor other); +int atg___iand__(tensor *, tensor self, scalar other); +int atg___iand__tensor_(tensor *, tensor self, tensor other); +int atg___ilshift__(tensor *, tensor self, scalar other); +int atg___ilshift__tensor_(tensor *, tensor self, tensor other); +int atg___ior__(tensor *, tensor self, scalar other); +int atg___ior__tensor_(tensor *, tensor self, tensor other); +int atg___irshift__(tensor *, tensor self, scalar other); +int atg___irshift__tensor_(tensor *, tensor self, tensor other); +int atg___ixor__(tensor *, tensor self, scalar other); +int atg___ixor__tensor_(tensor *, tensor self, tensor other); +int atg___lshift__(tensor *, tensor self, scalar other); +int atg___lshift__tensor_(tensor *, tensor self, tensor other); +int atg___or__(tensor *, tensor self, scalar other); +int atg___or__tensor_(tensor *, tensor self, tensor other); +int atg___rshift__(tensor *, tensor self, scalar other); +int atg___rshift__tensor_(tensor *, tensor self, tensor other); +int atg___xor__(tensor *, tensor self, scalar other); +int atg___xor__tensor_(tensor *, tensor self, tensor other); +int atg__adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +int atg__adaptive_avg_pool2d_backward(tensor *, tensor grad_output, tensor self); +int atg__adaptive_avg_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +int atg__adaptive_avg_pool3d_backward(tensor *, tensor grad_output, tensor self); +int atg__add_batch_dim(tensor *, tensor self, int64_t batch_dim, int64_t level); +int atg__add_relu(tensor *, tensor self, tensor other); +int atg__add_relu_(tensor *, tensor self, tensor other); +int atg__add_relu_out(tensor *, tensor out, tensor self, tensor other); +int atg__add_relu_scalar(tensor *, tensor self, scalar other); +int atg__add_relu_scalar_(tensor *, tensor self, scalar other); +int atg__aminmax(tensor *, tensor self); +int atg__aminmax_dim(tensor *, tensor self, int64_t dim, int keepdim); +int atg__amp_update_scale_(tensor *, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); +int atg__baddbmm_mkl_(tensor *, tensor self, tensor batch1, tensor batch2); +int atg__cast_byte(tensor *, tensor self, int non_blocking); +int atg__cast_char(tensor *, tensor self, int non_blocking); +int atg__cast_double(tensor *, tensor self, int non_blocking); +int atg__cast_float(tensor *, tensor self, int non_blocking); +int atg__cast_half(tensor *, tensor self, int non_blocking); +int atg__cast_int(tensor *, tensor self, int non_blocking); +int atg__cast_long(tensor *, tensor self, int non_blocking); +int atg__cast_short(tensor *, tensor self, int non_blocking); +int atg__cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); +int atg__cat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); +int atg__cdist_backward(tensor *, tensor grad, tensor x1, tensor x2, double p, tensor cdist); +int atg__cholesky_solve_helper(tensor *, tensor self, tensor A, int upper); +int atg__coalesce(tensor *, tensor self); +int atg__coalesced_(tensor *, tensor self, int coalesced); +int atg__compute_linear_combination(tensor *, tensor input, tensor coefficients); +int atg__compute_linear_combination_out(tensor *, tensor out, tensor input, tensor coefficients); +int atg__conj(tensor *, tensor self); +int atg__conj_physical(tensor *, tensor self); +int atg__conv_depthwise2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +int atg__conv_depthwise2d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +int atg__conv_depthwise2d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +int atg__convert_indices_from_coo_to_csr(tensor *, tensor self, int64_t size, int out_int32); +int atg__convert_indices_from_coo_to_csr_out(tensor *, tensor out, tensor self, int64_t size, int out_int32); +int atg__convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32); +int atg__convolution_deprecated(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled); +int atg__convolution_mode(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg__convolution_nogroup(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len); +int atg__copy_from(tensor *, tensor self, tensor dst, int non_blocking); +int atg__copy_from_and_resize(tensor *, tensor self, tensor dst); +int atg__ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int zero_infinity); +int atg__ctc_loss_backward(tensor *, tensor grad, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, tensor neg_log_likelihood, tensor log_alpha, int64_t blank, int zero_infinity); +int atg__cudnn_ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int deterministic, int zero_infinity); +int atg__cudnn_init_dropout_state(tensor *, double dropout, int train, int64_t dropout_seed, int options_kind, int options_device); +int atg__cudnn_rnn(tensor *, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor weight_buf, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state); +int atg__cudnn_rnn_flatten_weight(tensor *, tensor *weight_arr_data, int weight_arr_len, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, int bidirectional); +int atg__det_lu_based_helper(tensor *, tensor self); +int atg__det_lu_based_helper_backward_helper(tensor *, tensor det_grad, tensor det, tensor self, tensor lu, tensor pivs); +int atg__dim_arange(tensor *, tensor like, int64_t dim); +int atg__dirichlet_grad(tensor *, tensor x, tensor alpha, tensor total); +int atg__embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx); +int atg__embedding_bag_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int64_t padding_idx); +int atg__embedding_bag_dense_backward(tensor *, tensor grad, tensor indices, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx); +int atg__embedding_bag_forward_only(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx); +int atg__embedding_bag_per_sample_weights_backward(tensor *, tensor grad, tensor weight, tensor indices, tensor offsets, tensor offset2bag, int64_t mode, int64_t padding_idx); +int atg__embedding_bag_sparse_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx); +int atg__empty_affine_quantized(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device, double scale, int64_t zero_point); +int atg__empty_per_channel_affine_quantized(tensor *, int64_t *size_data, int size_len, tensor scales, tensor zero_points, int64_t axis, int options_kind, int options_device); +int atg__euclidean_dist(tensor *, tensor x1, tensor x2); +int atg__fake_quantize_learnable_per_channel_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); +int atg__fake_quantize_learnable_per_channel_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); +int atg__fake_quantize_learnable_per_tensor_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); +int atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); +int atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(tensor *, tensor self, tensor scale, tensor zero_point, tensor fake_quant_enabled, int64_t quant_min, int64_t quant_max); +int atg__fft_c2c(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward); +int atg__fft_c2c_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward); +int atg__fft_c2r(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size); +int atg__fft_c2r_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size); +int atg__fft_r2c(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided); +int atg__fft_r2c_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided); +int atg__fused_dropout(tensor *, tensor self, double p); +int atg__fused_moving_avg_obs_fq_helper(tensor *, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant); +int atg__fw_primal(tensor *, tensor self, int64_t level); +int atg__gather_sparse_backward(tensor *, tensor self, int64_t dim, tensor index, tensor grad); +int atg__grid_sampler_2d_cpu_fallback(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +int atg__grid_sampler_2d_cpu_fallback_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +int atg__index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); +int atg__index_put_impl_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate, int unsafe); +int atg__indices(tensor *, tensor self); +int atg__inverse_helper(tensor *, tensor self); +int atg__linalg_inv_out_helper_(tensor *, tensor self, tensor infos_lu, tensor infos_getri); +int atg__linalg_qr_helper(tensor *, tensor self, char * mode); +int atg__log_softmax(tensor *, tensor self, int64_t dim, int half_to_float); +int atg__log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__log_softmax_backward_data_out(tensor *, tensor out, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__log_softmax_out(tensor *, tensor out, tensor self, int64_t dim, int half_to_float); +int atg__logcumsumexp(tensor *, tensor self, int64_t dim); +int atg__logcumsumexp_out(tensor *, tensor out, tensor self, int64_t dim); +int atg__lu_with_info(tensor *, tensor self, int pivot, int check_errors); +int atg__make_dual(tensor *, tensor primal, tensor tangent, int64_t level); +int atg__make_per_channel_quantized_tensor(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis); +int atg__make_per_tensor_quantized_tensor(tensor *, tensor self, double scale, int64_t zero_point); +int atg__masked_scale(tensor *, tensor self, tensor mask, double scale); +int atg__mkldnn_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); +int atg__mkldnn_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); +int atg__mkldnn_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); +int atg__neg_view(tensor *, tensor self); +int atg__nnpack_spatial_convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +int atg__nnpack_spatial_convolution_backward_input(tensor *, tensor input, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len); +int atg__nnpack_spatial_convolution_backward_weight(tensor *, tensor input, int64_t *weightsize_data, int weightsize_len, tensor grad_output, int64_t *padding_data, int padding_len); +int atg__pack_padded_sequence(tensor *, tensor input, tensor lengths, int batch_first); +int atg__pack_padded_sequence_backward(tensor *, tensor grad, int64_t *input_size_data, int input_size_len, tensor batch_sizes, int batch_first); +int atg__pad_packed_sequence(tensor *, tensor data, tensor batch_sizes, int batch_first, scalar padding_value, int64_t total_length); +int atg__pdist_backward(tensor *, tensor grad, tensor self, double p, tensor pdist); +int atg__pin_memory(tensor *, tensor self, int device); +int atg__remove_batch_dim(tensor *, tensor self, int64_t level, int64_t batch_size, int64_t out_dim); +int atg__reshape_alias(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len); +int atg__reshape_from_tensor(tensor *, tensor self, tensor shape); +int atg__rowwise_prune(tensor *, tensor weight, tensor mask, int compressed_indices_dtype); +int atg__s_where(tensor *, tensor condition, tensor self, tensor other); +int atg__sample_dirichlet(tensor *, tensor self); +int atg__saturate_weight_to_fp16(tensor *, tensor weight); +int atg__segment_reduce_backward(tensor *, tensor grad, tensor output, tensor data, char * reduce, tensor lengths, int64_t axis); +int atg__shape_as_tensor(tensor *, tensor self); +int atg__slow_conv2d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, tensor finput); +int atg__sobol_engine_draw(tensor *, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype); +int atg__sobol_engine_ff_(tensor *, tensor self, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated); +int atg__sobol_engine_initialize_state_(tensor *, tensor self, int64_t dimension); +int atg__sobol_engine_scramble_(tensor *, tensor self, tensor ltm, int64_t dimension); +int atg__softmax(tensor *, tensor self, int64_t dim, int half_to_float); +int atg__softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__softmax_backward_data_out(tensor *, tensor grad_input, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__softmax_out(tensor *, tensor out, tensor self, int64_t dim, int half_to_float); +int atg__solve_helper(tensor *, tensor self, tensor A); +int atg__sparse_addmm(tensor *, tensor self, tensor sparse, tensor dense); +int atg__sparse_coo_tensor_unsafe(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg__sparse_coo_tensor_with_dims(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg__sparse_coo_tensor_with_dims_and_tensors(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, tensor indices, tensor values, int options_kind, int options_device); +int atg__sparse_csr_tensor_unsafe(tensor *, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg__sparse_log_softmax(tensor *, tensor self, int64_t dim, int half_to_float); +int atg__sparse_log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__sparse_log_softmax_int(tensor *, tensor self, int64_t dim, int dtype); +int atg__sparse_mask_helper(tensor *, tensor t, tensor mask_indices); +int atg__sparse_mm(tensor *, tensor sparse, tensor dense); +int atg__sparse_softmax(tensor *, tensor self, int64_t dim, int half_to_float); +int atg__sparse_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); +int atg__sparse_softmax_int(tensor *, tensor self, int64_t dim, int dtype); +int atg__sparse_sparse_matmul(tensor *, tensor self, tensor other); +int atg__sparse_sum(tensor *, tensor self); +int atg__sparse_sum_backward(tensor *, tensor grad, tensor self, int64_t *dim_data, int dim_len); +int atg__sparse_sum_dim(tensor *, tensor self, int64_t *dim_data, int dim_len); +int atg__sparse_sum_dim_dtype(tensor *, tensor self, int64_t *dim_data, int dim_len, int dtype); +int atg__sparse_sum_dtype(tensor *, tensor self, int dtype); +int atg__stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); +int atg__stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); +int atg__standard_gamma(tensor *, tensor self); +int atg__standard_gamma_grad(tensor *, tensor self, tensor output); +int atg__svd_helper(tensor *, tensor self, int some, int compute_uv); +int atg__symeig_helper(tensor *, tensor self, int eigenvectors, int upper); +int atg__test_ambiguous_defaults(tensor *, tensor dummy, int64_t a, int64_t b); +int atg__test_ambiguous_defaults_b(tensor *, tensor dummy, int64_t a, char * b); +int atg__test_optional_filled_intlist(tensor *, tensor values, int64_t *addends_data, int addends_len); +int atg__test_optional_intlist(tensor *, tensor values, int64_t *addends_data, int addends_len); +int atg__test_serialization_subcmul(tensor *, tensor self, tensor other); +int atg__test_string_default(tensor *, tensor dummy, char * a, char * b); +int atg__thnn_differentiable_gru_cell_backward(tensor *, tensor grad_hy, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias); +int atg__thnn_differentiable_lstm_cell_backward(tensor *, tensor grad_hy, tensor grad_cy, tensor input_gates, tensor hidden_gates, tensor input_bias, tensor hidden_bias, tensor cx, tensor cy); +int atg__thnn_fused_gru_cell(tensor *, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias); +int atg__thnn_fused_gru_cell_backward(tensor *, tensor grad_hy, tensor workspace, int has_bias); +int atg__thnn_fused_lstm_cell(tensor *, tensor input_gates, tensor hidden_gates, tensor cx, tensor input_bias, tensor hidden_bias); +int atg__thnn_fused_lstm_cell_backward(tensor *, tensor grad_hy, tensor grad_cy, tensor cx, tensor cy, tensor workspace, int has_bias); +int atg__to_copy(tensor *, tensor self, int options_kind, int options_device, int non_blocking); +// tensor *atg__to_cpu(tensor *tensors_data, int tensors_len); +int atg__to_cpu(tensor *, tensor *tensors_data, int tensors_len); +int atg__trilinear(tensor *, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim); +int atg__unique(tensor *, tensor self, int sorted, int return_inverse); +int atg__unique2(tensor *, tensor self, int sorted, int return_inverse, int return_counts); +int atg__unpack_dual(tensor *, tensor dual, int64_t level); +int atg__unsafe_view(tensor *, tensor self, int64_t *size_data, int size_len); +int atg__values(tensor *, tensor self); +int atg__weight_norm(tensor *, tensor v, tensor g, int64_t dim); +int atg__weight_norm_cuda_interface(tensor *, tensor v, tensor g, int64_t dim); +int atg__weight_norm_cuda_interface_backward(tensor *, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim); +int atg__weight_norm_differentiable_backward(tensor *, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim); int atg_abs(tensor *, tensor self); int atg_abs_(tensor *, tensor self); int atg_abs_out(tensor *, tensor out, tensor self); +int atg_absolute(tensor *, tensor self); +int atg_absolute_(tensor *, tensor self); +int atg_absolute_out(tensor *, tensor out, tensor self); int atg_acos(tensor *, tensor self); int atg_acos_(tensor *, tensor self); int atg_acos_out(tensor *, tensor out, tensor self); +int atg_acosh(tensor *, tensor self); +int atg_acosh_(tensor *, tensor self); +int atg_acosh_out(tensor *, tensor out, tensor self); int atg_adaptive_avg_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_avg_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_avg_pool3d_backward(tensor *, tensor grad_output, tensor self); -int atg_adaptive_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self); +int atg_adaptive_avg_pool3d_backward(tensor *, tensor grad_input, tensor grad_output, tensor self); int atg_adaptive_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_max_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_max_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_max_pool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); +int atg_adaptive_max_pool2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); int atg_adaptive_max_pool2d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_max_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); int atg_adaptive_max_pool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); +int atg_adaptive_max_pool3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); int atg_adaptive_max_pool3d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); int atg_add(tensor *, tensor self, tensor other); -int atg_add1(tensor *, tensor self, scalar other); int atg_add_(tensor *, tensor self, tensor other); -int atg_add_1(tensor *, tensor self, scalar other); int atg_add_out(tensor *, tensor out, tensor self, tensor other); +int atg_add_scalar(tensor *, tensor self, scalar other); +int atg_add_scalar_(tensor *, tensor self, scalar other); int atg_addbmm(tensor *, tensor self, tensor batch1, tensor batch2); int atg_addbmm_(tensor *, tensor self, tensor batch1, tensor batch2); int atg_addbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); @@ -52,85 +254,160 @@ int atg_align_as(tensor *, tensor self, tensor other); // tensor *atg_align_tensors(tensor *tensors_data, int tensors_len); int atg_align_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_all(tensor *, tensor self); -int atg_all1(tensor *, tensor self, int64_t dim, int keepdim); +int atg_all_all_out(tensor *, tensor out, tensor self); +int atg_all_dim(tensor *, tensor self, int64_t dim, int keepdim); int atg_all_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); int atg_alpha_dropout(tensor *, tensor input, double p, int train); int atg_alpha_dropout_(tensor *, tensor self, double p, int train); +int atg_amax(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_amax_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_amin(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_amin_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_aminmax(tensor *, tensor self, int64_t dim, int keepdim); +int atg_aminmax_out(tensor *, tensor min, tensor max, tensor self, int64_t dim, int keepdim); int atg_angle(tensor *, tensor self); int atg_angle_out(tensor *, tensor out, tensor self); int atg_any(tensor *, tensor self); -int atg_any1(tensor *, tensor self, int64_t dim, int keepdim); +int atg_any_all_out(tensor *, tensor out, tensor self); +int atg_any_dim(tensor *, tensor self, int64_t dim, int keepdim); int atg_any_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); int atg_arange(tensor *, scalar end, int options_kind, int options_device); -int atg_arange1(tensor *, scalar start, scalar end, int options_kind, int options_device); -int atg_arange2(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); int atg_arange_out(tensor *, tensor out, scalar end); -int atg_arange_out1(tensor *, tensor out, scalar start, scalar end); +int atg_arange_start(tensor *, scalar start, scalar end, int options_kind, int options_device); +int atg_arange_start_out(tensor *, tensor out, scalar start, scalar end); +int atg_arange_start_step(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); +int atg_arccos(tensor *, tensor self); +int atg_arccos_(tensor *, tensor self); +int atg_arccos_out(tensor *, tensor out, tensor self); +int atg_arccosh(tensor *, tensor self); +int atg_arccosh_(tensor *, tensor self); +int atg_arccosh_out(tensor *, tensor out, tensor self); +int atg_arcsin(tensor *, tensor self); +int atg_arcsin_(tensor *, tensor self); +int atg_arcsin_out(tensor *, tensor out, tensor self); +int atg_arcsinh(tensor *, tensor self); +int atg_arcsinh_(tensor *, tensor self); +int atg_arcsinh_out(tensor *, tensor out, tensor self); +int atg_arctan(tensor *, tensor self); +int atg_arctan_(tensor *, tensor self); +int atg_arctan_out(tensor *, tensor out, tensor self); +int atg_arctanh(tensor *, tensor self); +int atg_arctanh_(tensor *, tensor self); +int atg_arctanh_out(tensor *, tensor out, tensor self); int atg_argmax(tensor *, tensor self, int64_t dim, int keepdim); +int atg_argmax_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); int atg_argmin(tensor *, tensor self, int64_t dim, int keepdim); +int atg_argmin_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); int atg_argsort(tensor *, tensor self, int64_t dim, int descending); int atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); int atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); int atg_asin(tensor *, tensor self); int atg_asin_(tensor *, tensor self); int atg_asin_out(tensor *, tensor out, tensor self); +int atg_asinh(tensor *, tensor self); +int atg_asinh_(tensor *, tensor self); +int atg_asinh_out(tensor *, tensor out, tensor self); int atg_atan(tensor *, tensor self); int atg_atan2(tensor *, tensor self, tensor other); int atg_atan2_(tensor *, tensor self, tensor other); int atg_atan2_out(tensor *, tensor out, tensor self, tensor other); int atg_atan_(tensor *, tensor self); int atg_atan_out(tensor *, tensor out, tensor self); +int atg_atanh(tensor *, tensor self); +int atg_atanh_(tensor *, tensor self); +int atg_atanh_out(tensor *, tensor out, tensor self); +int atg_atleast_1d(tensor *, tensor self); +// tensor *atg_atleast_1d_sequence(tensor *tensors_data, int tensors_len); +int atg_atleast_1d_sequence(tensor *, tensor *tensors_data, int tensors_len); +int atg_atleast_2d(tensor *, tensor self); +// tensor *atg_atleast_2d_sequence(tensor *tensors_data, int tensors_len); +int atg_atleast_2d_sequence(tensor *, tensor *tensors_data, int tensors_len); +int atg_atleast_3d(tensor *, tensor self); +// tensor *atg_atleast_3d_sequence(tensor *tensors_data, int tensors_len); +int atg_atleast_3d_sequence(tensor *, tensor *tensors_data, int tensors_len); int atg_avg_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad); int atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_avg_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +int atg_avg_pool2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_avg_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_avg_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +int atg_avg_pool3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); int atg_baddbmm(tensor *, tensor self, tensor batch1, tensor batch2); int atg_baddbmm_(tensor *, tensor self, tensor batch1, tensor batch2); int atg_baddbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); int atg_bartlett_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_bartlett_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_bartlett_window_periodic(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); int atg_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled); -int atg_batch_norm_backward_elemt(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu); +int atg_batch_norm_backward_elemt(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu, tensor count); int atg_batch_norm_backward_reduce(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g); int atg_batch_norm_elemt(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); int atg_batch_norm_elemt_out(tensor *, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); int atg_batch_norm_gather_stats(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count); -int atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len); +int atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, tensor counts); int atg_batch_norm_stats(tensor *, tensor input, double eps); int atg_batch_norm_update_stats(tensor *, tensor input, tensor running_mean, tensor running_var, double momentum); int atg_bernoulli(tensor *, tensor self); -int atg_bernoulli1(tensor *, tensor self, double p); int atg_bernoulli_(tensor *, tensor self, tensor p); -int atg_bernoulli_1(tensor *, tensor self, double p); +int atg_bernoulli_float_(tensor *, tensor self, double p); int atg_bernoulli_out(tensor *, tensor out, tensor self); +int atg_bernoulli_p(tensor *, tensor self, double p); int atg_bilinear(tensor *, tensor input1, tensor input2, tensor weight, tensor bias); int atg_binary_cross_entropy(tensor *, tensor self, tensor target, tensor weight, int64_t reduction); int atg_binary_cross_entropy_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); -int atg_binary_cross_entropy_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); +int atg_binary_cross_entropy_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); int atg_binary_cross_entropy_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction); int atg_binary_cross_entropy_with_logits(tensor *, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); int atg_binary_cross_entropy_with_logits_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); int atg_bincount(tensor *, tensor self, tensor weights, int64_t minlength); +int atg_binomial(tensor *, tensor count, tensor prob); +int atg_bitwise_and(tensor *, tensor self, scalar other); +int atg_bitwise_and_(tensor *, tensor self, scalar other); +int atg_bitwise_and_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_bitwise_and_tensor(tensor *, tensor self, tensor other); +int atg_bitwise_and_tensor_(tensor *, tensor self, tensor other); +int atg_bitwise_and_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_bitwise_left_shift(tensor *, tensor self, tensor other); +int atg_bitwise_left_shift_(tensor *, tensor self, tensor other); +int atg_bitwise_left_shift_scalar_tensor(tensor *, scalar self, tensor other); +int atg_bitwise_left_shift_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_bitwise_left_shift_tensor_scalar(tensor *, tensor self, scalar other); +int atg_bitwise_left_shift_tensor_scalar_(tensor *, tensor self, scalar other); +int atg_bitwise_left_shift_tensor_scalar_out(tensor *, tensor out, tensor self, scalar other); int atg_bitwise_not(tensor *, tensor self); int atg_bitwise_not_(tensor *, tensor self); int atg_bitwise_not_out(tensor *, tensor out, tensor self); +int atg_bitwise_or(tensor *, tensor self, scalar other); +int atg_bitwise_or_(tensor *, tensor self, scalar other); +int atg_bitwise_or_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_bitwise_or_tensor(tensor *, tensor self, tensor other); +int atg_bitwise_or_tensor_(tensor *, tensor self, tensor other); +int atg_bitwise_or_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_bitwise_right_shift(tensor *, tensor self, tensor other); +int atg_bitwise_right_shift_(tensor *, tensor self, tensor other); +int atg_bitwise_right_shift_scalar_tensor(tensor *, scalar self, tensor other); +int atg_bitwise_right_shift_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_bitwise_right_shift_tensor_scalar(tensor *, tensor self, scalar other); +int atg_bitwise_right_shift_tensor_scalar_(tensor *, tensor self, scalar other); +int atg_bitwise_right_shift_tensor_scalar_out(tensor *, tensor out, tensor self, scalar other); int atg_bitwise_xor(tensor *, tensor self, scalar other); -int atg_bitwise_xor1(tensor *, tensor self, tensor other); int atg_bitwise_xor_(tensor *, tensor self, scalar other); -int atg_bitwise_xor_1(tensor *, tensor self, tensor other); -int atg_bitwise_xor_out(tensor *, tensor out, tensor self, tensor other); -int atg_bitwise_xor_out1(tensor *, tensor out, tensor self, scalar other); +int atg_bitwise_xor_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_bitwise_xor_tensor(tensor *, tensor self, tensor other); +int atg_bitwise_xor_tensor_(tensor *, tensor self, tensor other); +int atg_bitwise_xor_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_blackman_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_blackman_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_blackman_window_periodic(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_block_diag(tensor *, tensor *tensors_data, int tensors_len); int atg_bmm(tensor *, tensor self, tensor mat2); int atg_bmm_out(tensor *, tensor out, tensor self, tensor mat2); // tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len); int atg_broadcast_tensors(tensor *, tensor *tensors_data, int tensors_len); +int atg_broadcast_to(tensor *, tensor self, int64_t *size_data, int size_len); +int atg_bucketize(tensor *, tensor self, tensor boundaries, int out_int32, int right); +int atg_bucketize_scalar(tensor *, scalar self, tensor boundaries, int out_int32, int right); +int atg_bucketize_tensor_out(tensor *, tensor out, tensor self, tensor boundaries, int out_int32, int right); int atg_cartesian_prod(tensor *, tensor *tensors_data, int tensors_len); int atg_cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); int atg_cat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); @@ -142,12 +419,15 @@ int atg_ceil_out(tensor *, tensor out, tensor self); int atg_celu(tensor *, tensor self); int atg_celu_(tensor *, tensor self); int atg_chain_matmul(tensor *, tensor *matrices_data, int matrices_len); +int atg_chain_matmul_out(tensor *, tensor out, tensor *matrices_data, int matrices_len); +int atg_channel_shuffle(tensor *, tensor self, int64_t groups); int atg_cholesky(tensor *, tensor self, int upper); int atg_cholesky_inverse(tensor *, tensor self, int upper); int atg_cholesky_inverse_out(tensor *, tensor out, tensor self, int upper); int atg_cholesky_out(tensor *, tensor out, tensor self, int upper); int atg_cholesky_solve(tensor *, tensor self, tensor input2, int upper); int atg_cholesky_solve_out(tensor *, tensor out, tensor self, tensor input2, int upper); +int atg_choose_qparams_optimized(tensor *, tensor input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); // tensor *atg_chunk(tensor self, int64_t chunks, int64_t dim); int atg_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); int atg_clamp(tensor *, tensor self, scalar min, scalar max); @@ -155,24 +435,53 @@ int atg_clamp_(tensor *, tensor self, scalar min, scalar max); int atg_clamp_max(tensor *, tensor self, scalar max); int atg_clamp_max_(tensor *, tensor self, scalar max); int atg_clamp_max_out(tensor *, tensor out, tensor self, scalar max); +int atg_clamp_max_tensor(tensor *, tensor self, tensor max); +int atg_clamp_max_tensor_(tensor *, tensor self, tensor max); +int atg_clamp_max_tensor_out(tensor *, tensor out, tensor self, tensor max); int atg_clamp_min(tensor *, tensor self, scalar min); int atg_clamp_min_(tensor *, tensor self, scalar min); int atg_clamp_min_out(tensor *, tensor out, tensor self, scalar min); +int atg_clamp_min_tensor(tensor *, tensor self, tensor min); +int atg_clamp_min_tensor_(tensor *, tensor self, tensor min); +int atg_clamp_min_tensor_out(tensor *, tensor out, tensor self, tensor min); int atg_clamp_out(tensor *, tensor out, tensor self, scalar min, scalar max); +int atg_clamp_tensor(tensor *, tensor self, tensor min, tensor max); +int atg_clamp_tensor_(tensor *, tensor self, tensor min, tensor max); +int atg_clamp_tensor_out(tensor *, tensor out, tensor self, tensor min, tensor max); +int atg_clip(tensor *, tensor self, scalar min, scalar max); +int atg_clip_(tensor *, tensor self, scalar min, scalar max); +int atg_clip_out(tensor *, tensor out, tensor self, scalar min, scalar max); +int atg_clip_tensor(tensor *, tensor self, tensor min, tensor max); +int atg_clip_tensor_(tensor *, tensor self, tensor min, tensor max); +int atg_clip_tensor_out(tensor *, tensor out, tensor self, tensor min, tensor max); int atg_clone(tensor *, tensor self); int atg_coalesce(tensor *, tensor self); int atg_col2im(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); int atg_col2im_backward(tensor *, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_col2im_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +int atg_col2im_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); int atg_col2im_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +int atg_col_indices(tensor *, tensor self); +int atg_column_stack(tensor *, tensor *tensors_data, int tensors_len); +int atg_column_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); int atg_combinations(tensor *, tensor self, int64_t r, int with_replacement); +int atg_complex(tensor *, tensor real, tensor imag); +int atg_complex_out(tensor *, tensor out, tensor real, tensor imag); +int atg_concat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); +int atg_concat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); int atg_conj(tensor *, tensor self); -int atg_conj_out(tensor *, tensor out, tensor self); +int atg_conj_physical(tensor *, tensor self); +int atg_conj_physical_(tensor *, tensor self); +int atg_conj_physical_out(tensor *, tensor out, tensor self); int atg_constant_pad_nd(tensor *, tensor self, int64_t *pad_data, int pad_len); int atg_contiguous(tensor *, tensor self); int atg_conv1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_conv1d_padding(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups); int atg_conv2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_conv2d_padding(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups); int atg_conv3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_conv3d_padding(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_conv_depthwise3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +int atg_conv_depthwise3d_backward(tensor *, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); int atg_conv_tbc(tensor *, tensor self, tensor weight, tensor bias, int64_t pad); int atg_conv_tbc_backward(tensor *, tensor self, tensor input, tensor weight, tensor bias, int64_t pad); int atg_conv_transpose1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); @@ -181,6 +490,13 @@ int atg_conv_transpose3d(tensor *, tensor input, tensor weight, tensor bias, int int atg_convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); int atg_convolution_overrideable(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); int atg_copy_sparse_to_sparse_(tensor *, tensor self, tensor src, int non_blocking); +int atg_copysign(tensor *, tensor self, tensor other); +int atg_copysign_(tensor *, tensor self, tensor other); +int atg_copysign_out(tensor *, tensor out, tensor self, tensor other); +int atg_copysign_scalar(tensor *, tensor self, scalar other); +int atg_copysign_scalar_(tensor *, tensor self, scalar other); +int atg_copysign_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_corrcoef(tensor *, tensor self); int atg_cos(tensor *, tensor self); int atg_cos_(tensor *, tensor self); int atg_cos_out(tensor *, tensor out, tensor self); @@ -189,75 +505,124 @@ int atg_cosh_(tensor *, tensor self); int atg_cosh_out(tensor *, tensor out, tensor self); int atg_cosine_embedding_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); int atg_cosine_similarity(tensor *, tensor x1, tensor x2, int64_t dim, double eps); +int atg_cov(tensor *, tensor self, int64_t correction, tensor fweights, tensor aweights); int atg_cross(tensor *, tensor self, tensor other, int64_t dim); +int atg_cross_entropy_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, double label_smoothing); int atg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim); +int atg_crow_indices(tensor *, tensor self); int atg_ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity); -int atg_ctc_loss1(tensor *, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity); +int atg_ctc_loss_tensor(tensor *, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity); int atg_cudnn_affine_grid_generator(tensor *, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W); int atg_cudnn_affine_grid_generator_backward(tensor *, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W); int atg_cudnn_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); int atg_cudnn_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace); -int atg_cudnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_backward_bias(tensor *, tensor grad_output); -int atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose_backward_bias(tensor *, tensor grad_output); -int atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +int atg_cudnn_convolution(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_add_relu(tensor *, tensor self, tensor weight, tensor z, scalar alpha, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_deprecated(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +int atg_cudnn_convolution_deprecated2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +int atg_cudnn_convolution_relu(tensor *, tensor self, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +int atg_cudnn_convolution_transpose_deprecated(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +int atg_cudnn_convolution_transpose_deprecated2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); int atg_cudnn_grid_sampler(tensor *, tensor self, tensor grid); int atg_cudnn_grid_sampler_backward(tensor *, tensor self, tensor grid, tensor grad_output); +int atg_cummax(tensor *, tensor self, int64_t dim); +int atg_cummax_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim); +int atg_cummaxmin_backward(tensor *, tensor grad, tensor input, tensor indices, int64_t dim); +int atg_cummin(tensor *, tensor self, int64_t dim); +int atg_cummin_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim); int atg_cumprod(tensor *, tensor self, int64_t dim, int dtype); +int atg_cumprod_(tensor *, tensor self, int64_t dim, int dtype); +int atg_cumprod_backward(tensor *, tensor grad, tensor input, int64_t dim, tensor output); int atg_cumprod_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); int atg_cumsum(tensor *, tensor self, int64_t dim, int dtype); +int atg_cumsum_(tensor *, tensor self, int64_t dim, int dtype); int atg_cumsum_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); +int atg_cumulative_trapezoid(tensor *, tensor y, int64_t dim); +int atg_cumulative_trapezoid_x(tensor *, tensor y, tensor x, int64_t dim); int atg_data(tensor *, tensor self); +int atg_deg2rad(tensor *, tensor self); +int atg_deg2rad_(tensor *, tensor self); +int atg_deg2rad_out(tensor *, tensor out, tensor self); int atg_dequantize(tensor *, tensor self); +// tensor *atg_dequantize_tensors(tensor *tensors_data, int tensors_len); +int atg_dequantize_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_det(tensor *, tensor self); int atg_detach(tensor *, tensor self); int atg_detach_(tensor *, tensor self); int atg_diag(tensor *, tensor self, int64_t diagonal); +int atg_diag_backward(tensor *, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t diagonal); int atg_diag_embed(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); int atg_diag_out(tensor *, tensor out, tensor self, int64_t diagonal); int atg_diagflat(tensor *, tensor self, int64_t offset); int atg_diagonal(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); +int atg_diagonal_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2); +int atg_diff(tensor *, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append); +int atg_diff_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append); int atg_digamma(tensor *, tensor self); int atg_digamma_(tensor *, tensor self); int atg_digamma_out(tensor *, tensor out, tensor self); int atg_dist(tensor *, tensor self, tensor other); int atg_div(tensor *, tensor self, tensor other); -int atg_div1(tensor *, tensor self, scalar other); int atg_div_(tensor *, tensor self, tensor other); -int atg_div_1(tensor *, tensor self, scalar other); int atg_div_out(tensor *, tensor out, tensor self, tensor other); +int atg_div_out_mode(tensor *, tensor out, tensor self, tensor other, char * rounding_mode); +int atg_div_scalar(tensor *, tensor self, scalar other); +int atg_div_scalar_(tensor *, tensor self, scalar other); +int atg_div_scalar_mode(tensor *, tensor self, scalar other, char * rounding_mode); +int atg_div_scalar_mode_(tensor *, tensor self, scalar other, char * rounding_mode); +int atg_div_tensor_mode(tensor *, tensor self, tensor other, char * rounding_mode); +int atg_div_tensor_mode_(tensor *, tensor self, tensor other, char * rounding_mode); +int atg_divide(tensor *, tensor self, tensor other); +int atg_divide_(tensor *, tensor self, tensor other); +int atg_divide_out(tensor *, tensor out, tensor self, tensor other); +int atg_divide_out_mode(tensor *, tensor out, tensor self, tensor other, char * rounding_mode); +int atg_divide_scalar(tensor *, tensor self, scalar other); +int atg_divide_scalar_(tensor *, tensor self, scalar other); +int atg_divide_scalar_mode(tensor *, tensor self, scalar other, char * rounding_mode); +int atg_divide_scalar_mode_(tensor *, tensor self, scalar other, char * rounding_mode); +int atg_divide_tensor_mode(tensor *, tensor self, tensor other, char * rounding_mode); +int atg_divide_tensor_mode_(tensor *, tensor self, tensor other, char * rounding_mode); int atg_dot(tensor *, tensor self, tensor tensor); int atg_dot_out(tensor *, tensor out, tensor self, tensor tensor); int atg_dropout(tensor *, tensor input, double p, int train); int atg_dropout_(tensor *, tensor self, double p, int train); +// tensor *atg_dsplit(tensor self, int64_t sections); +int atg_dsplit(tensor *, tensor self, int64_t sections); +// tensor *atg_dsplit_array(tensor self, int64_t *indices_data, int indices_len); +int atg_dsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); +int atg_dstack(tensor *, tensor *tensors_data, int tensors_len); +int atg_dstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); int atg_eig(tensor *, tensor self, int eigenvectors); -int atg_eig_out(tensor *, tensor e, tensor v, tensor self, int eigenvectors); +int atg_eig_e(tensor *, tensor e, tensor v, tensor self, int eigenvectors); +int atg_einsum(tensor *, char * equation, tensor *tensors_data, int tensors_len); int atg_elu(tensor *, tensor self); int atg_elu_(tensor *, tensor self); -int atg_elu_backward(tensor *, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); -int atg_elu_backward_out(tensor *, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); +int atg_elu_backward(tensor *, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result); +int atg_elu_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result); int atg_elu_out(tensor *, tensor out, tensor self); int atg_embedding(tensor *, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse); int atg_embedding_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse); -int atg_embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights); +int atg_embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset); +int atg_embedding_bag_padding_idx(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx); int atg_embedding_dense_backward(tensor *, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); int atg_embedding_renorm_(tensor *, tensor self, tensor indices, double max_norm, double norm_type); int atg_embedding_sparse_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); int atg_empty(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_empty_like(tensor *, tensor self); -int atg_empty_like1(tensor *, tensor self, int options_kind, int options_device); int atg_empty_out(tensor *, tensor out, int64_t *size_data, int size_len); +int atg_empty_quantized(tensor *, int64_t *size_data, int size_len, tensor qtensor, int options_kind, int options_device); int atg_empty_strided(tensor *, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device); int atg_eq(tensor *, tensor self, scalar other); -int atg_eq1(tensor *, tensor self, tensor other); int atg_eq_(tensor *, tensor self, scalar other); -int atg_eq_1(tensor *, tensor self, tensor other); -int atg_eq_out(tensor *, tensor out, tensor self, scalar other); -int atg_eq_out1(tensor *, tensor out, tensor self, tensor other); +int atg_eq_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_eq_tensor(tensor *, tensor self, tensor other); +int atg_eq_tensor_(tensor *, tensor self, tensor other); +int atg_eq_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_erf(tensor *, tensor self); int atg_erf_(tensor *, tensor self); int atg_erf_out(tensor *, tensor out, tensor self); @@ -268,6 +633,9 @@ int atg_erfinv(tensor *, tensor self); int atg_erfinv_(tensor *, tensor self); int atg_erfinv_out(tensor *, tensor out, tensor self); int atg_exp(tensor *, tensor self); +int atg_exp2(tensor *, tensor self); +int atg_exp2_(tensor *, tensor self); +int atg_exp2_out(tensor *, tensor out, tensor self); int atg_exp_(tensor *, tensor self); int atg_exp_out(tensor *, tensor out, tensor self); int atg_expand(tensor *, tensor self, int64_t *size_data, int size_len, int implicit); @@ -277,77 +645,157 @@ int atg_expm1_(tensor *, tensor self); int atg_expm1_out(tensor *, tensor out, tensor self); int atg_exponential_(tensor *, tensor self, double lambd); int atg_eye(tensor *, int64_t n, int options_kind, int options_device); -int atg_eye1(tensor *, int64_t n, int64_t m, int options_kind, int options_device); +int atg_eye_m(tensor *, int64_t n, int64_t m, int options_kind, int options_device); +int atg_eye_m_out(tensor *, tensor out, int64_t n, int64_t m); int atg_eye_out(tensor *, tensor out, int64_t n); -int atg_eye_out1(tensor *, tensor out, int64_t n, int64_t m); int atg_fake_quantize_per_channel_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); -int atg_fake_quantize_per_channel_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +int atg_fake_quantize_per_channel_affine_cachemask(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +int atg_fake_quantize_per_channel_affine_cachemask_backward(tensor *, tensor grad, tensor mask); int atg_fake_quantize_per_tensor_affine(tensor *, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); -int atg_fake_quantize_per_tensor_affine_backward(tensor *, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +int atg_fake_quantize_per_tensor_affine_cachemask(tensor *, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +int atg_fake_quantize_per_tensor_affine_cachemask_backward(tensor *, tensor grad, tensor mask); +int atg_fake_quantize_per_tensor_affine_tensor_qparams(tensor *, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max); int atg_fbgemm_linear_fp16_weight(tensor *, tensor input, tensor packed_weight, tensor bias); int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *, tensor input, tensor packed_weight, tensor bias); int atg_fbgemm_linear_int8_weight(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); int atg_fbgemm_pack_gemm_matrix_fp16(tensor *, tensor input); int atg_fbgemm_pack_quantized_matrix(tensor *, tensor input); -int atg_fbgemm_pack_quantized_matrix1(tensor *, tensor input, int64_t K, int64_t n); +int atg_fbgemm_pack_quantized_matrix_kn(tensor *, tensor input, int64_t K, int64_t n); int atg_feature_alpha_dropout(tensor *, tensor input, double p, int train); int atg_feature_alpha_dropout_(tensor *, tensor self, double p, int train); int atg_feature_dropout(tensor *, tensor input, double p, int train); int atg_feature_dropout_(tensor *, tensor self, double p, int train); -int atg_fft(tensor *, tensor self, int64_t signal_ndim, int normalized); +int atg_fft_fft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_fft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_fft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_fft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_fftfreq(tensor *, int64_t n, double d, int options_kind, int options_device); +int atg_fft_fftfreq_out(tensor *, tensor out, int64_t n, double d); +int atg_fft_fftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_fftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_fftshift(tensor *, tensor self, int64_t *dim_data, int dim_len); +int atg_fft_hfft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_hfft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_ifft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_ifft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_ifft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_ifft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_ifftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_ifftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_ifftshift(tensor *, tensor self, int64_t *dim_data, int dim_len); +int atg_fft_ihfft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_ihfft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_irfft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_irfft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_irfft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_irfft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_irfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_irfftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_rfft(tensor *, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_rfft2(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_rfft2_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_rfft_out(tensor *, tensor out, tensor self, int64_t n, int64_t dim, char * norm); +int atg_fft_rfftfreq(tensor *, int64_t n, double d, int options_kind, int options_device); +int atg_fft_rfftfreq_out(tensor *, tensor out, int64_t n, double d); +int atg_fft_rfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); +int atg_fft_rfftn_out(tensor *, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm); int atg_fill_(tensor *, tensor self, scalar value); -int atg_fill_1(tensor *, tensor self, tensor value); int atg_fill_diagonal_(tensor *, tensor self, scalar fill_value, int wrap); +int atg_fill_tensor_(tensor *, tensor self, tensor value); +int atg_fix(tensor *, tensor self); +int atg_fix_(tensor *, tensor self); +int atg_fix_out(tensor *, tensor out, tensor self); int atg_flatten(tensor *, tensor self, int64_t start_dim, int64_t end_dim); +int atg_flatten_dense_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_flip(tensor *, tensor self, int64_t *dims_data, int dims_len); +int atg_fliplr(tensor *, tensor self); +int atg_flipud(tensor *, tensor self); +int atg_float_power(tensor *, tensor self, tensor exponent); +int atg_float_power_(tensor *, tensor self, scalar exponent); +int atg_float_power_scalar(tensor *, scalar self, tensor exponent); +int atg_float_power_scalar_out(tensor *, tensor out, scalar self, tensor exponent); +int atg_float_power_tensor_(tensor *, tensor self, tensor exponent); +int atg_float_power_tensor_scalar(tensor *, tensor self, scalar exponent); +int atg_float_power_tensor_scalar_out(tensor *, tensor out, tensor self, scalar exponent); +int atg_float_power_tensor_tensor_out(tensor *, tensor out, tensor self, tensor exponent); int atg_floor(tensor *, tensor self); int atg_floor_(tensor *, tensor self); +int atg_floor_divide(tensor *, tensor self, tensor other); +int atg_floor_divide_(tensor *, tensor self, tensor other); +int atg_floor_divide_out(tensor *, tensor out, tensor self, tensor other); +int atg_floor_divide_scalar(tensor *, tensor self, scalar other); +int atg_floor_divide_scalar_(tensor *, tensor self, scalar other); int atg_floor_out(tensor *, tensor out, tensor self); +int atg_fmax(tensor *, tensor self, tensor other); +int atg_fmax_out(tensor *, tensor out, tensor self, tensor other); +int atg_fmin(tensor *, tensor self, tensor other); +int atg_fmin_out(tensor *, tensor out, tensor self, tensor other); int atg_fmod(tensor *, tensor self, scalar other); -int atg_fmod1(tensor *, tensor self, tensor other); int atg_fmod_(tensor *, tensor self, scalar other); -int atg_fmod_1(tensor *, tensor self, tensor other); -int atg_fmod_out(tensor *, tensor out, tensor self, scalar other); -int atg_fmod_out1(tensor *, tensor out, tensor self, tensor other); +int atg_fmod_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_fmod_tensor(tensor *, tensor self, tensor other); +int atg_fmod_tensor_(tensor *, tensor self, tensor other); +int atg_fmod_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_frac(tensor *, tensor self); int atg_frac_(tensor *, tensor self); int atg_frac_out(tensor *, tensor out, tensor self); int atg_fractional_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); int atg_fractional_max_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool2d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +int atg_fractional_max_pool2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +int atg_fractional_max_pool2d_output(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); int atg_fractional_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); int atg_fractional_max_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool3d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +int atg_fractional_max_pool3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +int atg_fractional_max_pool3d_output(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +int atg_frexp(tensor *, tensor self); +int atg_frexp_tensor_out(tensor *, tensor mantissa, tensor exponent, tensor self); int atg_frobenius_norm(tensor *, tensor self); -int atg_frobenius_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_frobenius_norm_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); int atg_frobenius_norm_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_from_file(tensor *, char * filename, int shared, int64_t size, int options_kind, int options_device); int atg_full(tensor *, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); int atg_full_like(tensor *, tensor self, scalar fill_value); -int atg_full_like1(tensor *, tensor self, scalar fill_value, int options_kind, int options_device); int atg_full_out(tensor *, tensor out, int64_t *size_data, int size_len, scalar fill_value); +int atg_fused_moving_avg_obs_fake_quant(tensor *, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant); int atg_gather(tensor *, tensor self, int64_t dim, tensor index, int sparse_grad); +int atg_gather_backward(tensor *, tensor grad, tensor self, int64_t dim, tensor index, int sparse_grad); int atg_gather_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad); +int atg_gcd(tensor *, tensor self, tensor other); +int atg_gcd_(tensor *, tensor self, tensor other); +int atg_gcd_out(tensor *, tensor out, tensor self, tensor other); int atg_ge(tensor *, tensor self, scalar other); -int atg_ge1(tensor *, tensor self, tensor other); int atg_ge_(tensor *, tensor self, scalar other); -int atg_ge_1(tensor *, tensor self, tensor other); -int atg_ge_out(tensor *, tensor out, tensor self, scalar other); -int atg_ge_out1(tensor *, tensor out, tensor self, tensor other); +int atg_ge_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_ge_tensor(tensor *, tensor self, tensor other); +int atg_ge_tensor_(tensor *, tensor self, tensor other); +int atg_ge_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_gelu(tensor *, tensor self); int atg_gelu_backward(tensor *, tensor grad, tensor self); +int atg_gelu_backward_grad_input(tensor *, tensor grad_input, tensor grad, tensor self); +int atg_gelu_out(tensor *, tensor out, tensor self); int atg_geometric_(tensor *, tensor self, double p); int atg_geqrf(tensor *, tensor self); -int atg_geqrf_out(tensor *, tensor a, tensor tau, tensor self); +int atg_geqrf_a(tensor *, tensor a, tensor tau, tensor self); int atg_ger(tensor *, tensor self, tensor vec2); int atg_ger_out(tensor *, tensor out, tensor self, tensor vec2); int atg_glu(tensor *, tensor self, int64_t dim); int atg_glu_backward(tensor *, tensor grad_output, tensor self, int64_t dim); -int atg_glu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t dim); +int atg_glu_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t dim); int atg_glu_out(tensor *, tensor out, tensor self, int64_t dim); int atg_grad(tensor *, tensor self); +int atg_greater(tensor *, tensor self, scalar other); +int atg_greater_(tensor *, tensor self, scalar other); +int atg_greater_equal(tensor *, tensor self, scalar other); +int atg_greater_equal_(tensor *, tensor self, scalar other); +int atg_greater_equal_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_greater_equal_tensor(tensor *, tensor self, tensor other); +int atg_greater_equal_tensor_(tensor *, tensor self, tensor other); +int atg_greater_equal_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_greater_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_greater_tensor(tensor *, tensor self, tensor other); +int atg_greater_tensor_(tensor *, tensor self, tensor other); +int atg_greater_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_grid_sampler(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); int atg_grid_sampler_2d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); int atg_grid_sampler_2d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); @@ -355,91 +803,221 @@ int atg_grid_sampler_3d(tensor *, tensor input, tensor grid, int64_t interpolati int atg_grid_sampler_3d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); int atg_group_norm(tensor *, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled); int atg_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +int atg_gru_data(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_gt(tensor *, tensor self, scalar other); -int atg_gt1(tensor *, tensor self, tensor other); int atg_gt_(tensor *, tensor self, scalar other); -int atg_gt_1(tensor *, tensor self, tensor other); -int atg_gt_out(tensor *, tensor out, tensor self, scalar other); -int atg_gt_out1(tensor *, tensor out, tensor self, tensor other); +int atg_gt_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_gt_tensor(tensor *, tensor self, tensor other); +int atg_gt_tensor_(tensor *, tensor self, tensor other); +int atg_gt_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_hamming_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_hamming_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); -int atg_hamming_window2(tensor *, int64_t window_length, int periodic, double alpha, int options_kind, int options_device); -int atg_hamming_window3(tensor *, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device); +int atg_hamming_window_periodic(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_hamming_window_periodic_alpha(tensor *, int64_t window_length, int periodic, double alpha, int options_kind, int options_device); +int atg_hamming_window_periodic_alpha_beta(tensor *, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device); int atg_hann_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_hann_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_hann_window_periodic(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); int atg_hardshrink(tensor *, tensor self); int atg_hardshrink_backward(tensor *, tensor grad_out, tensor self, scalar lambd); +int atg_hardshrink_backward_grad_input(tensor *, tensor grad_input, tensor grad_out, tensor self, scalar lambd); +int atg_hardshrink_out(tensor *, tensor out, tensor self); +int atg_hardsigmoid(tensor *, tensor self); +int atg_hardsigmoid_(tensor *, tensor self); +int atg_hardsigmoid_backward(tensor *, tensor grad_output, tensor self); +int atg_hardsigmoid_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self); +int atg_hardsigmoid_out(tensor *, tensor out, tensor self); +int atg_hardswish(tensor *, tensor self); +int atg_hardswish_(tensor *, tensor self); +int atg_hardswish_backward(tensor *, tensor grad_output, tensor self); +int atg_hardswish_out(tensor *, tensor out, tensor self); int atg_hardtanh(tensor *, tensor self); int atg_hardtanh_(tensor *, tensor self); int atg_hardtanh_backward(tensor *, tensor grad_output, tensor self, scalar min_val, scalar max_val); -int atg_hardtanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val); +int atg_hardtanh_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val); int atg_hardtanh_out(tensor *, tensor out, tensor self); +int atg_heaviside(tensor *, tensor self, tensor values); +int atg_heaviside_(tensor *, tensor self, tensor values); +int atg_heaviside_out(tensor *, tensor out, tensor self, tensor values); int atg_hinge_embedding_loss(tensor *, tensor self, tensor target, double margin, int64_t reduction); int atg_histc(tensor *, tensor self, int64_t bins); int atg_histc_out(tensor *, tensor out, tensor self, int64_t bins); +// tensor *atg_hsplit(tensor self, int64_t sections); +int atg_hsplit(tensor *, tensor self, int64_t sections); +// tensor *atg_hsplit_array(tensor self, int64_t *indices_data, int indices_len); +int atg_hsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); int atg_hspmm(tensor *, tensor mat1, tensor mat2); int atg_hspmm_out(tensor *, tensor out, tensor mat1, tensor mat2); -int atg_ifft(tensor *, tensor self, int64_t signal_ndim, int normalized); +int atg_hstack(tensor *, tensor *tensors_data, int tensors_len); +int atg_hstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); +int atg_huber_loss(tensor *, tensor self, tensor target, int64_t reduction, double delta); +int atg_huber_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta); +int atg_huber_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta); +int atg_huber_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction, double delta); +int atg_hypot(tensor *, tensor self, tensor other); +int atg_hypot_(tensor *, tensor self, tensor other); +int atg_hypot_out(tensor *, tensor out, tensor self, tensor other); +int atg_i0(tensor *, tensor self); +int atg_i0_(tensor *, tensor self); +int atg_i0_out(tensor *, tensor out, tensor self); +int atg_igamma(tensor *, tensor self, tensor other); +int atg_igamma_(tensor *, tensor self, tensor other); +int atg_igamma_out(tensor *, tensor out, tensor self, tensor other); +int atg_igammac(tensor *, tensor self, tensor other); +int atg_igammac_(tensor *, tensor self, tensor other); +int atg_igammac_out(tensor *, tensor out, tensor self, tensor other); int atg_im2col(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); int atg_im2col_backward(tensor *, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_im2col_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +int atg_im2col_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); int atg_im2col_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); int atg_imag(tensor *, tensor self); -int atg_imag_out(tensor *, tensor out, tensor self); int atg_index(tensor *, tensor self, tensor *indices_data, int indices_len); int atg_index_add(tensor *, tensor self, int64_t dim, tensor index, tensor source); int atg_index_add_(tensor *, tensor self, int64_t dim, tensor index, tensor source); +int atg_index_add_alpha(tensor *, tensor self, int64_t dim, tensor index, tensor source, scalar alpha); +int atg_index_add_alpha_(tensor *, tensor self, int64_t dim, tensor index, tensor source, scalar alpha); int atg_index_copy(tensor *, tensor self, int64_t dim, tensor index, tensor source); int atg_index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); int atg_index_fill(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_index_fill1(tensor *, tensor self, int64_t dim, tensor index, tensor value); int atg_index_fill_(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_index_fill_1(tensor *, tensor self, int64_t dim, tensor index, tensor value); +int atg_index_fill_int_tensor(tensor *, tensor self, int64_t dim, tensor index, tensor value); +int atg_index_fill_int_tensor_(tensor *, tensor self, int64_t dim, tensor index, tensor value); int atg_index_put(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); int atg_index_put_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); int atg_index_select(tensor *, tensor self, int64_t dim, tensor index); +int atg_index_select_backward(tensor *, tensor grad, int64_t *self_sizes_data, int self_sizes_len, int64_t dim, tensor index); int atg_index_select_out(tensor *, tensor out, tensor self, int64_t dim, tensor index); int atg_indices(tensor *, tensor self); +int atg_infinitely_differentiable_gelu_backward(tensor *, tensor grad, tensor self); +int atg_inner(tensor *, tensor self, tensor other); +int atg_inner_out(tensor *, tensor out, tensor self, tensor other); int atg_instance_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled); int atg_int_repr(tensor *, tensor self); int atg_inverse(tensor *, tensor self); int atg_inverse_out(tensor *, tensor out, tensor self); -int atg_irfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len); int atg_isclose(tensor *, tensor self, tensor other, double rtol, double atol, int equal_nan); int atg_isfinite(tensor *, tensor self); +int atg_isin(tensor *, tensor elements, tensor test_elements, int assume_unique, int invert); +int atg_isin_scalar_tensor(tensor *, scalar element, tensor test_elements, int assume_unique, int invert); +int atg_isin_scalar_tensor_out(tensor *, tensor out, scalar element, tensor test_elements, int assume_unique, int invert); +int atg_isin_tensor_scalar(tensor *, tensor elements, scalar test_element, int assume_unique, int invert); +int atg_isin_tensor_scalar_out(tensor *, tensor out, tensor elements, scalar test_element, int assume_unique, int invert); +int atg_isin_tensor_tensor_out(tensor *, tensor out, tensor elements, tensor test_elements, int assume_unique, int invert); +int atg_isinf(tensor *, tensor self); int atg_isnan(tensor *, tensor self); -int atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction); -int atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +int atg_isneginf(tensor *, tensor self); +int atg_isneginf_out(tensor *, tensor out, tensor self); +int atg_isposinf(tensor *, tensor self); +int atg_isposinf_out(tensor *, tensor out, tensor self); +int atg_isreal(tensor *, tensor self); +int atg_istft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int center, int normalized, int onesided, int64_t length, int return_complex); +int atg_kaiser_window(tensor *, int64_t window_length, int options_kind, int options_device); +int atg_kaiser_window_beta(tensor *, int64_t window_length, int periodic, double beta, int options_kind, int options_device); +int atg_kaiser_window_periodic(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +int atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction, int log_target); +int atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, int log_target); +int atg_kron(tensor *, tensor self, tensor other); +int atg_kron_out(tensor *, tensor out, tensor self, tensor other); int atg_kthvalue(tensor *, tensor self, int64_t k, int64_t dim, int keepdim); -int atg_kthvalue_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim); +int atg_kthvalue_values(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim); int atg_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); int atg_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +int atg_l1_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); int atg_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); int atg_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable); +int atg_lcm(tensor *, tensor self, tensor other); +int atg_lcm_(tensor *, tensor self, tensor other); +int atg_lcm_out(tensor *, tensor out, tensor self, tensor other); +int atg_ldexp(tensor *, tensor self, tensor other); +int atg_ldexp_(tensor *, tensor self, tensor other); +int atg_ldexp_out(tensor *, tensor out, tensor self, tensor other); int atg_le(tensor *, tensor self, scalar other); -int atg_le1(tensor *, tensor self, tensor other); int atg_le_(tensor *, tensor self, scalar other); -int atg_le_1(tensor *, tensor self, tensor other); -int atg_le_out(tensor *, tensor out, tensor self, scalar other); -int atg_le_out1(tensor *, tensor out, tensor self, tensor other); +int atg_le_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_le_tensor(tensor *, tensor self, tensor other); +int atg_le_tensor_(tensor *, tensor self, tensor other); +int atg_le_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_leaky_relu(tensor *, tensor self); int atg_leaky_relu_(tensor *, tensor self); -int atg_leaky_relu_backward(tensor *, tensor grad_output, tensor self, scalar negative_slope); -int atg_leaky_relu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope); +int atg_leaky_relu_backward(tensor *, tensor grad_output, tensor self, scalar negative_slope, int self_is_result); +int atg_leaky_relu_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope, int self_is_result); int atg_leaky_relu_out(tensor *, tensor out, tensor self); int atg_lerp(tensor *, tensor self, tensor end, scalar weight); -int atg_lerp1(tensor *, tensor self, tensor end, tensor weight); int atg_lerp_(tensor *, tensor self, tensor end, scalar weight); -int atg_lerp_1(tensor *, tensor self, tensor end, tensor weight); -int atg_lerp_out(tensor *, tensor out, tensor self, tensor end, scalar weight); -int atg_lerp_out1(tensor *, tensor out, tensor self, tensor end, tensor weight); +int atg_lerp_scalar_out(tensor *, tensor out, tensor self, tensor end, scalar weight); +int atg_lerp_tensor(tensor *, tensor self, tensor end, tensor weight); +int atg_lerp_tensor_(tensor *, tensor self, tensor end, tensor weight); +int atg_lerp_tensor_out(tensor *, tensor out, tensor self, tensor end, tensor weight); +int atg_less(tensor *, tensor self, scalar other); +int atg_less_(tensor *, tensor self, scalar other); +int atg_less_equal(tensor *, tensor self, scalar other); +int atg_less_equal_(tensor *, tensor self, scalar other); +int atg_less_equal_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_less_equal_tensor(tensor *, tensor self, tensor other); +int atg_less_equal_tensor_(tensor *, tensor self, tensor other); +int atg_less_equal_tensor_out(tensor *, tensor out, tensor self, tensor other); +int atg_less_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_less_tensor(tensor *, tensor self, tensor other); +int atg_less_tensor_(tensor *, tensor self, tensor other); +int atg_less_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_lgamma(tensor *, tensor self); int atg_lgamma_(tensor *, tensor self); int atg_lgamma_out(tensor *, tensor out, tensor self); +int atg_linalg_cholesky(tensor *, tensor self, int upper); +int atg_linalg_cholesky_ex(tensor *, tensor self, int upper, int check_errors); +int atg_linalg_cholesky_ex_l(tensor *, tensor L, tensor info, tensor self, int upper, int check_errors); +int atg_linalg_cholesky_out(tensor *, tensor out, tensor self, int upper); +int atg_linalg_cond(tensor *, tensor self, scalar p); +int atg_linalg_cond_out(tensor *, tensor out, tensor self, scalar p); +int atg_linalg_cond_p_str(tensor *, tensor self, char * p); +int atg_linalg_cond_p_str_out(tensor *, tensor out, tensor self, char * p); +int atg_linalg_det(tensor *, tensor self); +int atg_linalg_det_out(tensor *, tensor out, tensor self); +int atg_linalg_eig(tensor *, tensor self); +int atg_linalg_eig_out(tensor *, tensor eigenvalues, tensor eigenvectors, tensor self); +int atg_linalg_eigh(tensor *, tensor self, char * UPLO); +int atg_linalg_eigh_eigvals(tensor *, tensor eigvals, tensor eigvecs, tensor self, char * UPLO); +int atg_linalg_eigvals(tensor *, tensor self); +int atg_linalg_eigvals_out(tensor *, tensor out, tensor self); +int atg_linalg_eigvalsh(tensor *, tensor self, char * UPLO); +int atg_linalg_eigvalsh_out(tensor *, tensor out, tensor self, char * UPLO); +int atg_linalg_householder_product(tensor *, tensor input, tensor tau); +int atg_linalg_householder_product_out(tensor *, tensor out, tensor input, tensor tau); +int atg_linalg_inv(tensor *, tensor self); +int atg_linalg_inv_ex(tensor *, tensor self, int check_errors); +int atg_linalg_inv_ex_inverse(tensor *, tensor inverse, tensor info, tensor self, int check_errors); +int atg_linalg_inv_out(tensor *, tensor out, tensor self); +int atg_linalg_lstsq(tensor *, tensor self, tensor b, double rcond, char * driver); +int atg_linalg_lstsq_out(tensor *, tensor solution, tensor residuals, tensor rank, tensor singular_values, tensor self, tensor b, double rcond, char * driver); +int atg_linalg_matmul(tensor *, tensor self, tensor other); +int atg_linalg_matmul_out(tensor *, tensor out, tensor self, tensor other); +int atg_linalg_matrix_power(tensor *, tensor self, int64_t n); +int atg_linalg_matrix_power_out(tensor *, tensor out, tensor self, int64_t n); +int atg_linalg_matrix_rank(tensor *, tensor self, double tol, int hermitian); +int atg_linalg_matrix_rank_out(tensor *, tensor out, tensor self, double tol, int hermitian); +int atg_linalg_matrix_rank_out_tol_tensor(tensor *, tensor out, tensor input, tensor tol, int hermitian); +int atg_linalg_matrix_rank_tol_tensor(tensor *, tensor input, tensor tol, int hermitian); +int atg_linalg_multi_dot(tensor *, tensor *tensors_data, int tensors_len); +int atg_linalg_multi_dot_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); +int atg_linalg_pinv(tensor *, tensor self, double rcond, int hermitian); +int atg_linalg_pinv_out(tensor *, tensor out, tensor self, double rcond, int hermitian); +int atg_linalg_pinv_out_rcond_tensor(tensor *, tensor out, tensor self, tensor rcond, int hermitian); +int atg_linalg_pinv_rcond_tensor(tensor *, tensor self, tensor rcond, int hermitian); +int atg_linalg_qr(tensor *, tensor self, char * mode); +int atg_linalg_qr_out(tensor *, tensor Q, tensor R, tensor self, char * mode); +int atg_linalg_slogdet(tensor *, tensor self); +int atg_linalg_slogdet_out(tensor *, tensor sign, tensor logabsdet, tensor self); +int atg_linalg_solve(tensor *, tensor input, tensor other); +int atg_linalg_solve_out(tensor *, tensor out, tensor input, tensor other); +int atg_linalg_svd(tensor *, tensor self, int full_matrices); +int atg_linalg_svd_u(tensor *, tensor U, tensor S, tensor Vh, tensor self, int full_matrices); +int atg_linalg_svdvals(tensor *, tensor input); +int atg_linalg_svdvals_out(tensor *, tensor out, tensor input); +int atg_linalg_tensorinv(tensor *, tensor self, int64_t ind); +int atg_linalg_tensorinv_out(tensor *, tensor out, tensor self, int64_t ind); +int atg_linalg_tensorsolve(tensor *, tensor self, tensor other, int64_t *dims_data, int dims_len); +int atg_linalg_tensorsolve_out(tensor *, tensor out, tensor self, tensor other, int64_t *dims_data, int dims_len); int atg_linear(tensor *, tensor input, tensor weight, tensor bias); +int atg_linear_out(tensor *, tensor out, tensor input, tensor weight, tensor bias); int atg_linspace(tensor *, scalar start, scalar end, int64_t steps, int options_kind, int options_device); int atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps); int atg_log(tensor *, tensor self); @@ -457,87 +1035,114 @@ int atg_log_normal_(tensor *, tensor self, double mean, double std); int atg_log_out(tensor *, tensor out, tensor self); int atg_log_sigmoid(tensor *, tensor self); int atg_log_sigmoid_backward(tensor *, tensor grad_output, tensor self, tensor buffer); -int atg_log_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor buffer); +int atg_log_sigmoid_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor buffer); int atg_log_sigmoid_out(tensor *, tensor out, tensor self); int atg_log_softmax(tensor *, tensor self, int64_t dim, int dtype); +int atg_logaddexp(tensor *, tensor self, tensor other); +int atg_logaddexp2(tensor *, tensor self, tensor other); +int atg_logaddexp2_out(tensor *, tensor out, tensor self, tensor other); +int atg_logaddexp_out(tensor *, tensor out, tensor self, tensor other); +int atg_logcumsumexp(tensor *, tensor self, int64_t dim); +int atg_logcumsumexp_out(tensor *, tensor out, tensor self, int64_t dim); int atg_logdet(tensor *, tensor self); +int atg_logical_and(tensor *, tensor self, tensor other); +int atg_logical_and_(tensor *, tensor self, tensor other); +int atg_logical_and_out(tensor *, tensor out, tensor self, tensor other); int atg_logical_not(tensor *, tensor self); int atg_logical_not_(tensor *, tensor self); int atg_logical_not_out(tensor *, tensor out, tensor self); +int atg_logical_or(tensor *, tensor self, tensor other); +int atg_logical_or_(tensor *, tensor self, tensor other); +int atg_logical_or_out(tensor *, tensor out, tensor self, tensor other); int atg_logical_xor(tensor *, tensor self, tensor other); int atg_logical_xor_(tensor *, tensor self, tensor other); int atg_logical_xor_out(tensor *, tensor out, tensor self, tensor other); +int atg_logit(tensor *, tensor self, double eps); +int atg_logit_(tensor *, tensor self, double eps); +int atg_logit_backward(tensor *, tensor grad_output, tensor self, double eps); +int atg_logit_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, double eps); +int atg_logit_out(tensor *, tensor out, tensor self, double eps); int atg_logspace(tensor *, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device); int atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps, double base); int atg_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); int atg_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); int atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +int atg_lstm_data(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_lstsq(tensor *, tensor self, tensor A); -int atg_lstsq_out(tensor *, tensor X, tensor qr, tensor self, tensor A); +int atg_lstsq_x(tensor *, tensor X, tensor qr, tensor self, tensor A); int atg_lt(tensor *, tensor self, scalar other); -int atg_lt1(tensor *, tensor self, tensor other); int atg_lt_(tensor *, tensor self, scalar other); -int atg_lt_1(tensor *, tensor self, tensor other); -int atg_lt_out(tensor *, tensor out, tensor self, scalar other); -int atg_lt_out1(tensor *, tensor out, tensor self, tensor other); +int atg_lt_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_lt_tensor(tensor *, tensor self, tensor other); +int atg_lt_tensor_(tensor *, tensor self, tensor other); +int atg_lt_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_lu_solve(tensor *, tensor self, tensor LU_data, tensor LU_pivots); int atg_lu_solve_out(tensor *, tensor out, tensor self, tensor LU_data, tensor LU_pivots); +int atg_lu_unpack(tensor *, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots); +int atg_lu_unpack_out(tensor *, tensor P, tensor L, tensor U, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots); int atg_margin_ranking_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); int atg_masked_fill(tensor *, tensor self, tensor mask, scalar value); -int atg_masked_fill1(tensor *, tensor self, tensor mask, tensor value); int atg_masked_fill_(tensor *, tensor self, tensor mask, scalar value); -int atg_masked_fill_1(tensor *, tensor self, tensor mask, tensor value); +int atg_masked_fill_tensor(tensor *, tensor self, tensor mask, tensor value); +int atg_masked_fill_tensor_(tensor *, tensor self, tensor mask, tensor value); int atg_masked_scatter(tensor *, tensor self, tensor mask, tensor source); int atg_masked_scatter_(tensor *, tensor self, tensor mask, tensor source); int atg_masked_select(tensor *, tensor self, tensor mask); +int atg_masked_select_backward(tensor *, tensor grad, tensor input, tensor mask); int atg_masked_select_out(tensor *, tensor out, tensor self, tensor mask); int atg_matmul(tensor *, tensor self, tensor other); int atg_matmul_out(tensor *, tensor out, tensor self, tensor other); +int atg_matrix_exp(tensor *, tensor self); +int atg_matrix_exp_backward(tensor *, tensor self, tensor grad); int atg_matrix_power(tensor *, tensor self, int64_t n); +int atg_matrix_power_out(tensor *, tensor out, tensor self, int64_t n); int atg_matrix_rank(tensor *, tensor self, int symmetric); -int atg_matrix_rank1(tensor *, tensor self, double tol, int symmetric); +int atg_matrix_rank_tol(tensor *, tensor self, double tol, int symmetric); int atg_max(tensor *, tensor self); -int atg_max1(tensor *, tensor self, tensor other); -int atg_max2(tensor *, tensor self, int64_t dim, int keepdim); +int atg_max_dim(tensor *, tensor self, int64_t dim, int keepdim); +int atg_max_dim_max(tensor *, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim); +int atg_max_other(tensor *, tensor self, tensor other); int atg_max_out(tensor *, tensor out, tensor self, tensor other); -int atg_max_out1(tensor *, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim); int atg_max_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool1d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool2d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool2d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool2d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +int atg_max_pool2d_with_indices_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); int atg_max_pool2d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool3d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_pool3d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool3d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +int atg_max_pool3d_with_indices_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); int atg_max_pool3d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_max_unpool2d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); int atg_max_unpool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); -int atg_max_unpool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); +int atg_max_unpool2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); int atg_max_unpool2d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); int atg_max_unpool3d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); int atg_max_unpool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_unpool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +int atg_max_unpool3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); int atg_max_unpool3d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_maximum(tensor *, tensor self, tensor other); +int atg_maximum_out(tensor *, tensor out, tensor self, tensor other); int atg_mean(tensor *, tensor self, int dtype); -int atg_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_mean_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_mean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_median(tensor *, tensor self); -int atg_median1(tensor *, tensor self, int64_t dim, int keepdim); -int atg_median_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +int atg_median_dim(tensor *, tensor self, int64_t dim, int keepdim); +int atg_median_dim_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); // tensor *atg_meshgrid(tensor *tensors_data, int tensors_len); int atg_meshgrid(tensor *, tensor *tensors_data, int tensors_len); +// tensor *atg_meshgrid_indexing(tensor *tensors_data, int tensors_len, char * indexing); +int atg_meshgrid_indexing(tensor *, tensor *tensors_data, int tensors_len, char * indexing); int atg_min(tensor *, tensor self); -int atg_min1(tensor *, tensor self, tensor other); -int atg_min2(tensor *, tensor self, int64_t dim, int keepdim); +int atg_min_dim(tensor *, tensor self, int64_t dim, int keepdim); +int atg_min_dim_min(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); +int atg_min_other(tensor *, tensor self, tensor other); int atg_min_out(tensor *, tensor out, tensor self, tensor other); -int atg_min_out1(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); -int atg_min_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_minimum(tensor *, tensor self, tensor other); +int atg_minimum_out(tensor *, tensor out, tensor self, tensor other); int atg_miopen_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); int atg_miopen_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon); int atg_miopen_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); @@ -551,254 +1156,369 @@ int atg_miopen_depthwise_convolution(tensor *, tensor self, tensor weight, tenso int atg_miopen_depthwise_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); int atg_miopen_depthwise_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); int atg_miopen_rnn(tensor *, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state); +int atg_mish(tensor *, tensor self); +int atg_mish_(tensor *, tensor self); +int atg_mish_backward(tensor *, tensor grad_output, tensor self); +int atg_mish_out(tensor *, tensor out, tensor self); int atg_mkldnn_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +int atg_mkldnn_adaptive_avg_pool2d_backward(tensor *, tensor grad_output, tensor self); int atg_mkldnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); int atg_mkldnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); int atg_mkldnn_convolution_backward_weights(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); -int atg_mkldnn_linear(tensor *, tensor input, tensor weight, tensor bias); +int atg_mkldnn_linear(tensor *, tensor self, tensor weight, tensor bias); +int atg_mkldnn_linear_backward_input(tensor *, int64_t *input_size_data, int input_size_len, tensor grad_output, tensor weight); +int atg_mkldnn_linear_backward_weights(tensor *, tensor grad_output, tensor input, tensor weight, int bias_defined); int atg_mkldnn_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +int atg_mkldnn_max_pool2d_backward(tensor *, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +int atg_mkldnn_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +int atg_mkldnn_max_pool3d_backward(tensor *, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_mkldnn_reorder_conv2d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); +int atg_mkldnn_reorder_conv3d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); int atg_mm(tensor *, tensor self, tensor mat2); int atg_mm_out(tensor *, tensor out, tensor self, tensor mat2); int atg_mode(tensor *, tensor self, int64_t dim, int keepdim); -int atg_mode_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +int atg_mode_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +int atg_moveaxis(tensor *, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len); +int atg_moveaxis_int(tensor *, tensor self, int64_t source, int64_t destination); +int atg_movedim(tensor *, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len); +int atg_movedim_int(tensor *, tensor self, int64_t source, int64_t destination); int atg_mse_loss(tensor *, tensor self, tensor target, int64_t reduction); int atg_mse_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_mse_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +int atg_mse_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); int atg_mse_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +int atg_msort(tensor *, tensor self); +int atg_msort_out(tensor *, tensor out, tensor self); int atg_mul(tensor *, tensor self, tensor other); -int atg_mul1(tensor *, tensor self, scalar other); int atg_mul_(tensor *, tensor self, tensor other); -int atg_mul_1(tensor *, tensor self, scalar other); int atg_mul_out(tensor *, tensor out, tensor self, tensor other); +int atg_mul_scalar(tensor *, tensor self, scalar other); +int atg_mul_scalar_(tensor *, tensor self, scalar other); int atg_multi_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); -int atg_multi_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); +int atg_multi_margin_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); int atg_multilabel_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); int atg_multilabel_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); -int atg_multilabel_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); +int atg_multilabel_margin_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); int atg_multilabel_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); int atg_multinomial(tensor *, tensor self, int64_t num_samples, int replacement); int atg_multinomial_out(tensor *, tensor out, tensor self, int64_t num_samples, int replacement); +int atg_multiply(tensor *, tensor self, tensor other); +int atg_multiply_(tensor *, tensor self, tensor other); +int atg_multiply_out(tensor *, tensor out, tensor self, tensor other); +int atg_multiply_scalar(tensor *, tensor self, scalar other); +int atg_multiply_scalar_(tensor *, tensor self, scalar other); int atg_mv(tensor *, tensor self, tensor vec); int atg_mv_out(tensor *, tensor out, tensor self, tensor vec); int atg_mvlgamma(tensor *, tensor self, int64_t p); int atg_mvlgamma_(tensor *, tensor self, int64_t p); +int atg_mvlgamma_out(tensor *, tensor out, tensor self, int64_t p); +int atg_nan_to_num(tensor *, tensor self, double nan, double posinf, double neginf); +int atg_nan_to_num_(tensor *, tensor self, double nan, double posinf, double neginf); +int atg_nan_to_num_out(tensor *, tensor out, tensor self, double nan, double posinf, double neginf); +int atg_nanmean(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_nanmean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_nanmedian(tensor *, tensor self); +int atg_nanmedian_dim(tensor *, tensor self, int64_t dim, int keepdim); +int atg_nanmedian_dim_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +int atg_nanquantile(tensor *, tensor self, tensor q, int64_t dim, int keepdim); +int atg_nanquantile_new(tensor *, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation); +int atg_nanquantile_new_out(tensor *, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation); +int atg_nanquantile_new_scalar(tensor *, tensor self, double q, int64_t dim, int keepdim, char * interpolation); +int atg_nanquantile_new_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation); +int atg_nanquantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim, int keepdim); +int atg_nanquantile_scalar(tensor *, tensor self, double q, int64_t dim, int keepdim); +int atg_nanquantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim, int keepdim); +int atg_nansum(tensor *, tensor self, int dtype); +int atg_nansum_dim_intlist(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_nansum_intlist_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_narrow(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); int atg_narrow_copy(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); +int atg_narrow_copy_out(tensor *, tensor out, tensor self, int64_t dim, int64_t start, int64_t length); +int atg_narrow_tensor(tensor *, tensor self, int64_t dim, tensor start, int64_t length); int atg_native_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); -int atg_native_layer_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps); +int atg_native_batch_norm_out(tensor *, tensor out, tensor save_mean, tensor save_invstd, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); +int atg_native_group_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps); +int atg_native_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps); int atg_native_norm(tensor *, tensor self); +int atg_native_norm_scalaropt_dim_dtype(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_ne(tensor *, tensor self, scalar other); -int atg_ne1(tensor *, tensor self, tensor other); int atg_ne_(tensor *, tensor self, scalar other); -int atg_ne_1(tensor *, tensor self, tensor other); -int atg_ne_out(tensor *, tensor out, tensor self, scalar other); -int atg_ne_out1(tensor *, tensor out, tensor self, tensor other); +int atg_ne_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_ne_tensor(tensor *, tensor self, tensor other); +int atg_ne_tensor_(tensor *, tensor self, tensor other); +int atg_ne_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_neg(tensor *, tensor self); int atg_neg_(tensor *, tensor self); int atg_neg_out(tensor *, tensor out, tensor self); +int atg_negative(tensor *, tensor self); +int atg_negative_(tensor *, tensor self); +int atg_negative_out(tensor *, tensor out, tensor self); int atg_new_empty(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg_new_empty_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device); int atg_new_full(tensor *, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); +int atg_new_ones(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_new_zeros(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg_nextafter(tensor *, tensor self, tensor other); +int atg_nextafter_(tensor *, tensor self, tensor other); +int atg_nextafter_out(tensor *, tensor out, tensor self, tensor other); int atg_nll_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nll_loss2d(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nll_loss2d_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +int atg_nll_loss2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); int atg_nll_loss2d_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nll_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +int atg_nll_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +int atg_nll_loss_nd(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nll_loss_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nonzero(tensor *, tensor self); // tensor *atg_nonzero_numpy(tensor self); int atg_nonzero_numpy(tensor *, tensor self); int atg_nonzero_out(tensor *, tensor out, tensor self); int atg_norm(tensor *, tensor self); -int atg_norm1(tensor *, tensor self, scalar p, int dtype); -int atg_norm2(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); -int atg_norm3(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_norm_dtype_out(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_norm_except_dim(tensor *, tensor v, int64_t pow, int64_t dim); int atg_norm_out(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); -int atg_norm_out1(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_norm_scalaropt_dim(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); +int atg_norm_scalaropt_dim_dtype(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_norm_scalaropt_dtype(tensor *, tensor self, scalar p, int dtype); +int atg_normal(tensor *, tensor out, tensor mean, double std); int atg_normal_(tensor *, tensor self, double mean, double std); -int atg_normal_out(tensor *, tensor out, tensor mean, double std); -int atg_normal_out1(tensor *, tensor out, double mean, tensor std); -int atg_normal_out2(tensor *, tensor out, tensor mean, tensor std); -int atg_normal_out3(tensor *, tensor out, double mean, double std, int64_t *size_data, int size_len); +int atg_normal_float_float_out(tensor *, tensor out, double mean, double std, int64_t *size_data, int size_len); +int atg_normal_float_tensor_out(tensor *, tensor out, double mean, tensor std); +int atg_normal_tensor_tensor_out(tensor *, tensor out, tensor mean, tensor std); +int atg_not_equal(tensor *, tensor self, scalar other); +int atg_not_equal_(tensor *, tensor self, scalar other); +int atg_not_equal_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_not_equal_tensor(tensor *, tensor self, tensor other); +int atg_not_equal_tensor_(tensor *, tensor self, tensor other); +int atg_not_equal_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_nuclear_norm(tensor *, tensor self, int keepdim); -int atg_nuclear_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_nuclear_norm_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_nuclear_norm_dim_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); int atg_nuclear_norm_out(tensor *, tensor out, tensor self, int keepdim); -int atg_nuclear_norm_out1(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); int atg_numpy_t(tensor *, tensor self); int atg_one_hot(tensor *, tensor self, int64_t num_classes); int atg_ones(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_ones_like(tensor *, tensor self); -int atg_ones_like1(tensor *, tensor self, int options_kind, int options_device); int atg_ones_out(tensor *, tensor out, int64_t *size_data, int size_len); int atg_orgqr(tensor *, tensor self, tensor input2); int atg_orgqr_out(tensor *, tensor out, tensor self, tensor input2); int atg_ormqr(tensor *, tensor self, tensor input2, tensor input3, int left, int transpose); int atg_ormqr_out(tensor *, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose); +int atg_outer(tensor *, tensor self, tensor vec2); +int atg_outer_out(tensor *, tensor out, tensor self, tensor vec2); +int atg_pad_sequence(tensor *, tensor *sequences_data, int sequences_len, int batch_first, double padding_value); int atg_pairwise_distance(tensor *, tensor x1, tensor x2, double p, double eps, int keepdim); int atg_pdist(tensor *, tensor self, double p); int atg_permute(tensor *, tensor self, int64_t *dims_data, int dims_len); -int atg_pin_memory(tensor *, tensor self); +int atg_pin_memory(tensor *, tensor self, int device); int atg_pinverse(tensor *, tensor self, double rcond); int atg_pixel_shuffle(tensor *, tensor self, int64_t upscale_factor); +int atg_pixel_unshuffle(tensor *, tensor self, int64_t downscale_factor); int atg_poisson(tensor *, tensor self); int atg_poisson_nll_loss(tensor *, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction); +int atg_polar(tensor *, tensor abs, tensor angle); +int atg_polar_out(tensor *, tensor out, tensor abs, tensor angle); int atg_polygamma(tensor *, int64_t n, tensor self); int atg_polygamma_(tensor *, tensor self, int64_t n); int atg_polygamma_out(tensor *, tensor out, int64_t n, tensor self); -int atg_pow(tensor *, tensor self, scalar exponent); -int atg_pow1(tensor *, tensor self, tensor exponent); -int atg_pow2(tensor *, scalar self, tensor exponent); +int atg_positive(tensor *, tensor self); +int atg_pow(tensor *, tensor self, tensor exponent); int atg_pow_(tensor *, tensor self, scalar exponent); -int atg_pow_1(tensor *, tensor self, tensor exponent); -int atg_pow_out(tensor *, tensor out, tensor self, scalar exponent); -int atg_pow_out1(tensor *, tensor out, tensor self, tensor exponent); -int atg_pow_out2(tensor *, tensor out, scalar self, tensor exponent); +int atg_pow_scalar(tensor *, scalar self, tensor exponent); +int atg_pow_scalar_out(tensor *, tensor out, scalar self, tensor exponent); +int atg_pow_tensor_(tensor *, tensor self, tensor exponent); +int atg_pow_tensor_scalar(tensor *, tensor self, scalar exponent); +int atg_pow_tensor_scalar_out(tensor *, tensor out, tensor self, scalar exponent); +int atg_pow_tensor_tensor_out(tensor *, tensor out, tensor self, tensor exponent); int atg_prelu(tensor *, tensor self, tensor weight); int atg_prelu_backward(tensor *, tensor grad_output, tensor self, tensor weight); int atg_prod(tensor *, tensor self, int dtype); -int atg_prod1(tensor *, tensor self, int64_t dim, int keepdim, int dtype); -int atg_prod_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim, int dtype); +int atg_prod_dim_int(tensor *, tensor self, int64_t dim, int keepdim, int dtype); +int atg_prod_int_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim, int dtype); +int atg_put(tensor *, tensor self, tensor index, tensor source, int accumulate); int atg_put_(tensor *, tensor self, tensor index, tensor source, int accumulate); int atg_q_per_channel_scales(tensor *, tensor self); int atg_q_per_channel_zero_points(tensor *, tensor self); int atg_qr(tensor *, tensor self, int some); -int atg_qr_out(tensor *, tensor Q, tensor R, tensor self, int some); +int atg_qr_q(tensor *, tensor Q, tensor R, tensor self, int some); +int atg_quantile(tensor *, tensor self, tensor q, int64_t dim, int keepdim); +int atg_quantile_new(tensor *, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation); +int atg_quantile_new_out(tensor *, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation); +int atg_quantile_new_scalar(tensor *, tensor self, double q, int64_t dim, int keepdim, char * interpolation); +int atg_quantile_new_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation); +int atg_quantile_out(tensor *, tensor out, tensor self, tensor q, int64_t dim, int keepdim); +int atg_quantile_scalar(tensor *, tensor self, double q, int64_t dim, int keepdim); +int atg_quantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t dim, int keepdim); int atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); int atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); -int atg_quantized_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_quantized_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +int atg_quantize_per_tensor_tensor_qparams(tensor *, tensor self, tensor scale, tensor zero_point, int dtype); +// tensor *atg_quantize_per_tensor_tensors(tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); +int atg_quantize_per_tensor_tensors(tensor *, tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); +int atg_quantized_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point); int atg_quantized_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -int atg_quantized_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic); -int atg_quantized_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic); int atg_quantized_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +int atg_quantized_max_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_quantized_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); int atg_quantized_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); int atg_quantized_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +int atg_rad2deg(tensor *, tensor self); +int atg_rad2deg_(tensor *, tensor self); +int atg_rad2deg_out(tensor *, tensor out, tensor self); int atg_rand(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_rand_like(tensor *, tensor self); -int atg_rand_like1(tensor *, tensor self, int options_kind, int options_device); int atg_rand_out(tensor *, tensor out, int64_t *size_data, int size_len); int atg_randint(tensor *, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_randint1(tensor *, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_randint_like(tensor *, tensor self, int64_t high); -int atg_randint_like1(tensor *, tensor self, int64_t low, int64_t high); -int atg_randint_like2(tensor *, tensor self, int64_t high, int options_kind, int options_device); -int atg_randint_like3(tensor *, tensor self, int64_t low, int64_t high, int options_kind, int options_device); +int atg_randint_like_low_dtype(tensor *, tensor self, int64_t low, int64_t high); +int atg_randint_low(tensor *, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg_randint_low_out(tensor *, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len); int atg_randint_out(tensor *, tensor out, int64_t high, int64_t *size_data, int size_len); -int atg_randint_out1(tensor *, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len); int atg_randn(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_randn_like(tensor *, tensor self); -int atg_randn_like1(tensor *, tensor self, int options_kind, int options_device); int atg_randn_out(tensor *, tensor out, int64_t *size_data, int size_len); int atg_random_(tensor *, tensor self); -int atg_random_1(tensor *, tensor self, int64_t to); -int atg_random_2(tensor *, tensor self, int64_t from, int64_t to); +int atg_random_from_(tensor *, tensor self, int64_t from, int64_t to); +int atg_random_to_(tensor *, tensor self, int64_t to); int atg_randperm(tensor *, int64_t n, int options_kind, int options_device); int atg_randperm_out(tensor *, tensor out, int64_t n); int atg_range(tensor *, scalar start, scalar end, int options_kind, int options_device); -int atg_range1(tensor *, scalar start, scalar end, int options_kind, int options_device); int atg_range_out(tensor *, tensor out, scalar start, scalar end); +int atg_range_step(tensor *, scalar start, scalar end, int options_kind, int options_device); +int atg_ravel(tensor *, tensor self); int atg_real(tensor *, tensor self); -int atg_real_out(tensor *, tensor out, tensor self); int atg_reciprocal(tensor *, tensor self); int atg_reciprocal_(tensor *, tensor self); int atg_reciprocal_out(tensor *, tensor out, tensor self); int atg_reflection_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); int atg_reflection_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad1d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); int atg_reflection_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); int atg_reflection_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); int atg_reflection_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); int atg_reflection_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad3d(tensor *, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad3d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_reflection_pad3d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); int atg_relu(tensor *, tensor self); +int atg_relu6(tensor *, tensor self); +int atg_relu6_(tensor *, tensor self); int atg_relu_(tensor *, tensor self); int atg_remainder(tensor *, tensor self, scalar other); -int atg_remainder1(tensor *, tensor self, tensor other); int atg_remainder_(tensor *, tensor self, scalar other); -int atg_remainder_1(tensor *, tensor self, tensor other); -int atg_remainder_out(tensor *, tensor out, tensor self, scalar other); -int atg_remainder_out1(tensor *, tensor out, tensor self, tensor other); +int atg_remainder_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_remainder_scalar_tensor(tensor *, scalar self, tensor other); +int atg_remainder_tensor(tensor *, tensor self, tensor other); +int atg_remainder_tensor_(tensor *, tensor self, tensor other); +int atg_remainder_tensor_out(tensor *, tensor out, tensor self, tensor other); int atg_renorm(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); int atg_renorm_(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); int atg_renorm_out(tensor *, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm); int atg_repeat(tensor *, tensor self, int64_t *repeats_data, int repeats_len); -int atg_repeat_interleave(tensor *, tensor repeats); -int atg_repeat_interleave1(tensor *, tensor self, tensor repeats, int64_t dim); -int atg_repeat_interleave2(tensor *, tensor self, int64_t repeats, int64_t dim); +int atg_repeat_interleave(tensor *, tensor repeats, int64_t output_size); +int atg_repeat_interleave_self_int(tensor *, tensor self, int64_t repeats, int64_t dim, int64_t output_size); +int atg_repeat_interleave_self_tensor(tensor *, tensor self, tensor repeats, int64_t dim, int64_t output_size); int atg_replication_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_replication_pad1d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_replication_pad2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad3d(tensor *, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad3d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +int atg_replication_pad3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); int atg_replication_pad3d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_requires_grad_(tensor *, tensor self, int _requires_grad); +int atg_requires_grad_(tensor *, tensor self, int requires_grad); int atg_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); int atg_reshape_as(tensor *, tensor self, tensor other); int atg_resize_(tensor *, tensor self, int64_t *size_data, int size_len); int atg_resize_as_(tensor *, tensor self, tensor the_template); -int atg_rfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided); +int atg_resize_as_sparse_(tensor *, tensor self, tensor the_template); +int atg_resolve_conj(tensor *, tensor self); +int atg_resolve_neg(tensor *, tensor self); int atg_rnn_relu(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_rnn_relu1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +int atg_rnn_relu_data(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_rnn_tanh(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_rnn_tanh1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +int atg_rnn_tanh_data(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); int atg_roll(tensor *, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len); int atg_rot90(tensor *, tensor self, int64_t k, int64_t *dims_data, int dims_len); int atg_round(tensor *, tensor self); int atg_round_(tensor *, tensor self); int atg_round_out(tensor *, tensor out, tensor self); +int atg_row_stack(tensor *, tensor *tensors_data, int tensors_len); +int atg_row_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); int atg_rrelu(tensor *, tensor self, int training); int atg_rrelu_(tensor *, tensor self, int training); int atg_rrelu_with_noise(tensor *, tensor self, tensor noise, int training); int atg_rrelu_with_noise_(tensor *, tensor self, tensor noise, int training); -int atg_rrelu_with_noise_backward(tensor *, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); -int atg_rrelu_with_noise_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); +int atg_rrelu_with_noise_backward(tensor *, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training, int self_is_result); int atg_rrelu_with_noise_out(tensor *, tensor out, tensor self, tensor noise, int training); int atg_rsqrt(tensor *, tensor self); int atg_rsqrt_(tensor *, tensor self); int atg_rsqrt_out(tensor *, tensor out, tensor self); int atg_rsub(tensor *, tensor self, tensor other); -int atg_rsub1(tensor *, tensor self, scalar other); +int atg_rsub_scalar(tensor *, tensor self, scalar other); int atg_scalar_tensor(tensor *, scalar s, int options_kind, int options_device); int atg_scatter(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_scatter1(tensor *, tensor self, int64_t dim, tensor index, scalar value); int atg_scatter_(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_scatter_1(tensor *, tensor self, int64_t dim, tensor index, scalar value); int atg_scatter_add(tensor *, tensor self, int64_t dim, tensor index, tensor src); int atg_scatter_add_(tensor *, tensor self, int64_t dim, tensor index, tensor src); +int atg_scatter_add_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, tensor src); +int atg_scatter_reduce(tensor *, tensor self, int64_t dim, tensor index, tensor src, char * reduce); +int atg_scatter_reduce_(tensor *, tensor self, int64_t dim, tensor index, tensor src, char * reduce); +int atg_scatter_reduce_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, tensor src, char * reduce); +int atg_scatter_src_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, tensor src); +int atg_scatter_value(tensor *, tensor self, int64_t dim, tensor index, scalar value); +int atg_scatter_value_(tensor *, tensor self, int64_t dim, tensor index, scalar value); +int atg_scatter_value_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, scalar value); +int atg_scatter_value_reduce(tensor *, tensor self, int64_t dim, tensor index, scalar value, char * reduce); +int atg_scatter_value_reduce_(tensor *, tensor self, int64_t dim, tensor index, scalar value, char * reduce); +int atg_scatter_value_reduce_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, scalar value, char * reduce); +int atg_searchsorted(tensor *, tensor sorted_sequence, tensor self, int out_int32, int right); +int atg_searchsorted_scalar(tensor *, tensor sorted_sequence, scalar self, int out_int32, int right); +int atg_searchsorted_tensor_out(tensor *, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right); +int atg_segment_reduce(tensor *, tensor data, char * reduce, tensor lengths, tensor indices, int64_t axis, int unsafe, scalar initial); int atg_select(tensor *, tensor self, int64_t dim, int64_t index); +int atg_select_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index); int atg_selu(tensor *, tensor self); int atg_selu_(tensor *, tensor self); int atg_set_(tensor *, tensor self); -int atg_set_1(tensor *, tensor self, tensor source); int atg_set_requires_grad(tensor *, tensor self, int r); +int atg_set_source_tensor_(tensor *, tensor self, tensor source); +int atg_sgn(tensor *, tensor self); +int atg_sgn_(tensor *, tensor self); +int atg_sgn_out(tensor *, tensor out, tensor self); int atg_sigmoid(tensor *, tensor self); int atg_sigmoid_(tensor *, tensor self); int atg_sigmoid_backward(tensor *, tensor grad_output, tensor output); -int atg_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); +int atg_sigmoid_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor output); int atg_sigmoid_out(tensor *, tensor out, tensor self); int atg_sign(tensor *, tensor self); int atg_sign_(tensor *, tensor self); int atg_sign_out(tensor *, tensor out, tensor self); +int atg_signbit(tensor *, tensor self); +int atg_signbit_out(tensor *, tensor out, tensor self); +int atg_silu(tensor *, tensor self); +int atg_silu_(tensor *, tensor self); +int atg_silu_backward(tensor *, tensor grad_output, tensor self); +int atg_silu_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self); +int atg_silu_out(tensor *, tensor out, tensor self); int atg_sin(tensor *, tensor self); int atg_sin_(tensor *, tensor self); int atg_sin_out(tensor *, tensor out, tensor self); +int atg_sinc(tensor *, tensor self); +int atg_sinc_(tensor *, tensor self); +int atg_sinc_out(tensor *, tensor out, tensor self); int atg_sinh(tensor *, tensor self); int atg_sinh_(tensor *, tensor self); int atg_sinh_out(tensor *, tensor out, tensor self); int atg_slice(tensor *, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step); +int atg_slice_backward(tensor *, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step); int atg_slogdet(tensor *, tensor self); int atg_slow_conv3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); int atg_slow_conv3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); @@ -809,33 +1529,108 @@ int atg_slow_conv_transpose2d_out(tensor *, tensor out, tensor self, tensor weig int atg_slow_conv_transpose3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); int atg_slow_conv_transpose3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); int atg_smm(tensor *, tensor self, tensor mat2); -int atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +int atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction, double beta); +int atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta); +int atg_smooth_l1_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta); +int atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction, double beta); int atg_soft_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); int atg_soft_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_soft_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +int atg_soft_margin_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); int atg_soft_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); int atg_softmax(tensor *, tensor self, int64_t dim, int dtype); int atg_softplus(tensor *, tensor self); int atg_softplus_backward(tensor *, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); -int atg_softplus_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); +int atg_softplus_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); int atg_softplus_out(tensor *, tensor out, tensor self); int atg_softshrink(tensor *, tensor self); int atg_softshrink_backward(tensor *, tensor grad_output, tensor self, scalar lambd); -int atg_softshrink_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar lambd); +int atg_softshrink_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar lambd); int atg_softshrink_out(tensor *, tensor out, tensor self); int atg_solve(tensor *, tensor self, tensor A); -int atg_solve_out(tensor *, tensor solution, tensor lu, tensor self, tensor A); +int atg_solve_solution(tensor *, tensor solution, tensor lu, tensor self, tensor A); int atg_sort(tensor *, tensor self, int64_t dim, int descending); -int atg_sort_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int descending); +int atg_sort_stable(tensor *, tensor self, int stable, int64_t dim, int descending); +int atg_sort_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int descending); +int atg_sort_values_stable(tensor *, tensor values, tensor indices, tensor self, int stable, int64_t dim, int descending); int atg_sparse_coo_tensor(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_sparse_coo_tensor1(tensor *, tensor indices, tensor values, int options_kind, int options_device); -int atg_sparse_coo_tensor2(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg_sparse_coo_tensor_indices(tensor *, tensor indices, tensor values, int options_kind, int options_device); +int atg_sparse_coo_tensor_indices_size(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); +int atg_sparse_csr_tensor(tensor *, tensor crow_indices, tensor col_indices, tensor values, int options_kind, int options_device); +int atg_sparse_csr_tensor_crow_col_value_size(tensor *, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_sparse_mask(tensor *, tensor self, tensor mask); int atg_sparse_resize_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); int atg_sparse_resize_and_clear_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); +int atg_special_digamma(tensor *, tensor self); +int atg_special_digamma_out(tensor *, tensor out, tensor self); +int atg_special_entr(tensor *, tensor self); +int atg_special_entr_out(tensor *, tensor out, tensor self); +int atg_special_erf(tensor *, tensor self); +int atg_special_erf_out(tensor *, tensor out, tensor self); +int atg_special_erfc(tensor *, tensor self); +int atg_special_erfc_out(tensor *, tensor out, tensor self); +int atg_special_erfcx(tensor *, tensor self); +int atg_special_erfcx_out(tensor *, tensor out, tensor self); +int atg_special_erfinv(tensor *, tensor self); +int atg_special_erfinv_out(tensor *, tensor out, tensor self); +int atg_special_exp2(tensor *, tensor self); +int atg_special_exp2_out(tensor *, tensor out, tensor self); +int atg_special_expit(tensor *, tensor self); +int atg_special_expit_out(tensor *, tensor out, tensor self); +int atg_special_expm1(tensor *, tensor self); +int atg_special_expm1_out(tensor *, tensor out, tensor self); +int atg_special_gammainc(tensor *, tensor self, tensor other); +int atg_special_gammainc_out(tensor *, tensor out, tensor self, tensor other); +int atg_special_gammaincc(tensor *, tensor self, tensor other); +int atg_special_gammaincc_out(tensor *, tensor out, tensor self, tensor other); +int atg_special_gammaln(tensor *, tensor self); +int atg_special_gammaln_out(tensor *, tensor out, tensor self); +int atg_special_i0(tensor *, tensor self); +int atg_special_i0_out(tensor *, tensor out, tensor self); +int atg_special_i0e(tensor *, tensor self); +int atg_special_i0e_out(tensor *, tensor out, tensor self); +int atg_special_i1(tensor *, tensor self); +int atg_special_i1_out(tensor *, tensor out, tensor self); +int atg_special_i1e(tensor *, tensor self); +int atg_special_i1e_out(tensor *, tensor out, tensor self); +int atg_special_log1p(tensor *, tensor self); +int atg_special_log1p_out(tensor *, tensor out, tensor self); +int atg_special_log_softmax(tensor *, tensor self, int64_t dim, int dtype); +int atg_special_logit(tensor *, tensor self, double eps); +int atg_special_logit_out(tensor *, tensor out, tensor self, double eps); +int atg_special_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_special_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +int atg_special_multigammaln(tensor *, tensor self, int64_t p); +int atg_special_multigammaln_out(tensor *, tensor out, tensor self, int64_t p); +int atg_special_ndtr(tensor *, tensor self); +int atg_special_ndtr_out(tensor *, tensor out, tensor self); +int atg_special_ndtri(tensor *, tensor self); +int atg_special_ndtri_out(tensor *, tensor out, tensor self); +int atg_special_polygamma(tensor *, int64_t n, tensor self); +int atg_special_polygamma_out(tensor *, tensor out, int64_t n, tensor self); +int atg_special_psi(tensor *, tensor self); +int atg_special_psi_out(tensor *, tensor out, tensor self); +int atg_special_round(tensor *, tensor self); +int atg_special_round_out(tensor *, tensor out, tensor self); +int atg_special_sinc(tensor *, tensor self); +int atg_special_sinc_out(tensor *, tensor out, tensor self); +int atg_special_xlog1py(tensor *, tensor self, tensor other); +int atg_special_xlog1py_other_scalar(tensor *, tensor self, scalar other); +int atg_special_xlog1py_other_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_special_xlog1py_out(tensor *, tensor out, tensor self, tensor other); +int atg_special_xlog1py_self_scalar(tensor *, scalar self, tensor other); +int atg_special_xlog1py_self_scalar_out(tensor *, tensor out, scalar self, tensor other); +int atg_special_xlogy(tensor *, tensor self, tensor other); +int atg_special_xlogy_other_scalar(tensor *, tensor self, scalar other); +int atg_special_xlogy_other_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_special_xlogy_out(tensor *, tensor out, tensor self, tensor other); +int atg_special_xlogy_self_scalar(tensor *, scalar self, tensor other); +int atg_special_xlogy_self_scalar_out(tensor *, tensor out, scalar self, tensor other); +int atg_special_zeta(tensor *, tensor self, tensor other); +int atg_special_zeta_other_scalar(tensor *, tensor self, scalar other); +int atg_special_zeta_other_scalar_out(tensor *, tensor out, tensor self, scalar other); +int atg_special_zeta_out(tensor *, tensor out, tensor self, tensor other); +int atg_special_zeta_self_scalar(tensor *, scalar self, tensor other); +int atg_special_zeta_self_scalar_out(tensor *, tensor out, scalar self, tensor other); // tensor *atg_split(tensor self, int64_t split_size, int64_t dim); int atg_split(tensor *, tensor self, int64_t split_size, int64_t dim); // tensor *atg_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); @@ -843,36 +1638,53 @@ int atg_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int s int atg_sqrt(tensor *, tensor self); int atg_sqrt_(tensor *, tensor self); int atg_sqrt_out(tensor *, tensor out, tensor self); +int atg_square(tensor *, tensor self); +int atg_square_(tensor *, tensor self); +int atg_square_out(tensor *, tensor out, tensor self); int atg_squeeze(tensor *, tensor self); -int atg_squeeze1(tensor *, tensor self, int64_t dim); int atg_squeeze_(tensor *, tensor self); -int atg_squeeze_1(tensor *, tensor self, int64_t dim); +int atg_squeeze_dim(tensor *, tensor self, int64_t dim); +int atg_squeeze_dim_(tensor *, tensor self, int64_t dim); int atg_sspaddmm(tensor *, tensor self, tensor mat1, tensor mat2); int atg_sspaddmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); int atg_stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); int atg_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); int atg_std(tensor *, tensor self, int unbiased); -int atg_std1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +int atg_std_correction(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_std_correction_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_std_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); int atg_std_mean(tensor *, tensor self, int unbiased); -int atg_std_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +int atg_std_mean_correction(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_std_mean_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); int atg_std_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided); +int atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided, int return_complex); int atg_sub(tensor *, tensor self, tensor other); -int atg_sub1(tensor *, tensor self, scalar other); int atg_sub_(tensor *, tensor self, tensor other); -int atg_sub_1(tensor *, tensor self, scalar other); int atg_sub_out(tensor *, tensor out, tensor self, tensor other); +int atg_sub_scalar(tensor *, tensor self, scalar other); +int atg_sub_scalar_(tensor *, tensor self, scalar other); +int atg_subtract(tensor *, tensor self, tensor other); +int atg_subtract_(tensor *, tensor self, tensor other); +int atg_subtract_out(tensor *, tensor out, tensor self, tensor other); +int atg_subtract_scalar(tensor *, tensor self, scalar other); +int atg_subtract_scalar_(tensor *, tensor self, scalar other); int atg_sum(tensor *, tensor self, int dtype); -int atg_sum1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_sum_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_sum_dim_intlist(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +int atg_sum_intlist_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); int atg_sum_to_size(tensor *, tensor self, int64_t *size_data, int size_len); int atg_svd(tensor *, tensor self, int some, int compute_uv); -int atg_svd_out(tensor *, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv); +int atg_svd_u(tensor *, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv); +int atg_swapaxes(tensor *, tensor self, int64_t axis0, int64_t axis1); +int atg_swapaxes_(tensor *, tensor self, int64_t axis0, int64_t axis1); +int atg_swapdims(tensor *, tensor self, int64_t dim0, int64_t dim1); +int atg_swapdims_(tensor *, tensor self, int64_t dim0, int64_t dim1); int atg_symeig(tensor *, tensor self, int eigenvectors, int upper); -int atg_symeig_out(tensor *, tensor e, tensor V, tensor self, int eigenvectors, int upper); +int atg_symeig_e(tensor *, tensor e, tensor V, tensor self, int eigenvectors, int upper); int atg_t(tensor *, tensor self); int atg_t_(tensor *, tensor self); int atg_take(tensor *, tensor self, tensor index); +int atg_take_along_dim(tensor *, tensor self, tensor indices, int64_t dim); +int atg_take_along_dim_out(tensor *, tensor out, tensor self, tensor indices, int64_t dim); int atg_take_out(tensor *, tensor out, tensor self, tensor index); int atg_tan(tensor *, tensor self); int atg_tan_(tensor *, tensor self); @@ -880,34 +1692,46 @@ int atg_tan_out(tensor *, tensor out, tensor self); int atg_tanh(tensor *, tensor self); int atg_tanh_(tensor *, tensor self); int atg_tanh_backward(tensor *, tensor grad_output, tensor output); -int atg_tanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); +int atg_tanh_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor output); int atg_tanh_out(tensor *, tensor out, tensor self); +// tensor *atg_tensor_split(tensor self, int64_t sections, int64_t dim); +int atg_tensor_split(tensor *, tensor self, int64_t sections, int64_t dim); +// tensor *atg_tensor_split_indices(tensor self, int64_t *indices_data, int indices_len, int64_t dim); +int atg_tensor_split_indices(tensor *, tensor self, int64_t *indices_data, int indices_len, int64_t dim); +// tensor *atg_tensor_split_tensor_indices_or_sections(tensor self, tensor tensor_indices_or_sections, int64_t dim); +int atg_tensor_split_tensor_indices_or_sections(tensor *, tensor self, tensor tensor_indices_or_sections, int64_t dim); int atg_tensordot(tensor *, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); +int atg_tensordot_out(tensor *, tensor out, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); int atg_threshold(tensor *, tensor self, scalar threshold, scalar value); int atg_threshold_(tensor *, tensor self, scalar threshold, scalar value); int atg_threshold_backward(tensor *, tensor grad_output, tensor self, scalar threshold); +int atg_threshold_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar threshold); int atg_threshold_out(tensor *, tensor out, tensor self, scalar threshold, scalar value); +int atg_tile(tensor *, tensor self, int64_t *dims_data, int dims_len); int atg_to(tensor *, tensor self, int device); -int atg_to1(tensor *, tensor self, int options_kind, int options_device, int non_blocking, int copy); -int atg_to2(tensor *, tensor self, int dtype, int non_blocking, int copy); -int atg_to3(tensor *, tensor self, tensor other, int non_blocking, int copy); -int atg_to4(tensor *, tensor self, int device, int dtype, int non_blocking, int copy); -int atg_to_dense(tensor *, tensor self); +int atg_to_dense(tensor *, tensor self, int dtype); int atg_to_dense_backward(tensor *, tensor grad, tensor input); -int atg_to_mkldnn(tensor *, tensor self); +int atg_to_device(tensor *, tensor self, int device, int dtype, int non_blocking, int copy); +int atg_to_dtype(tensor *, tensor self, int dtype, int non_blocking, int copy); +int atg_to_dtype_layout(tensor *, tensor self, int options_kind, int options_device, int non_blocking, int copy); +int atg_to_mkldnn(tensor *, tensor self, int dtype); int atg_to_mkldnn_backward(tensor *, tensor grad, tensor input); +int atg_to_other(tensor *, tensor self, tensor other, int non_blocking, int copy); int atg_to_sparse(tensor *, tensor self); -int atg_to_sparse1(tensor *, tensor self, int64_t sparse_dim); +int atg_to_sparse_sparse_dim(tensor *, tensor self, int64_t sparse_dim); int atg_topk(tensor *, tensor self, int64_t k, int64_t dim, int largest, int sorted); -int atg_topk_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted); +int atg_topk_values(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted); int atg_totype(tensor *, tensor self, int scalar_type); int atg_trace(tensor *, tensor self); +int atg_trace_backward(tensor *, tensor grad, int64_t *sizes_data, int sizes_len); int atg_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); int atg_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); +int atg_trapezoid(tensor *, tensor y, int64_t dim); +int atg_trapezoid_x(tensor *, tensor y, tensor x, int64_t dim); int atg_trapz(tensor *, tensor y, tensor x, int64_t dim); -int atg_trapz1(tensor *, tensor y, double dx, int64_t dim); +int atg_trapz_dx(tensor *, tensor y, double dx, int64_t dim); int atg_triangular_solve(tensor *, tensor self, tensor A, int upper, int transpose, int unitriangular); -int atg_triangular_solve_out(tensor *, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular); +int atg_triangular_solve_x(tensor *, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular); int atg_tril(tensor *, tensor self, int64_t diagonal); int atg_tril_(tensor *, tensor self, int64_t diagonal); int atg_tril_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); @@ -917,60 +1741,101 @@ int atg_triu(tensor *, tensor self, int64_t diagonal); int atg_triu_(tensor *, tensor self, int64_t diagonal); int atg_triu_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); int atg_triu_out(tensor *, tensor out, tensor self, int64_t diagonal); +int atg_true_divide(tensor *, tensor self, tensor other); +int atg_true_divide_(tensor *, tensor self, tensor other); +int atg_true_divide_out(tensor *, tensor out, tensor self, tensor other); +int atg_true_divide_scalar(tensor *, tensor self, scalar other); +int atg_true_divide_scalar_(tensor *, tensor self, scalar other); int atg_trunc(tensor *, tensor self); int atg_trunc_(tensor *, tensor self); int atg_trunc_out(tensor *, tensor out, tensor self); int atg_type_as(tensor *, tensor self, tensor other); // tensor *atg_unbind(tensor self, int64_t dim); int atg_unbind(tensor *, tensor self, int64_t dim); +int atg_unflatten(tensor *, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len); +// tensor *atg_unflatten_dense_tensors(tensor flat, tensor *tensors_data, int tensors_len); +int atg_unflatten_dense_tensors(tensor *, tensor flat, tensor *tensors_data, int tensors_len); int atg_unfold(tensor *, tensor self, int64_t dimension, int64_t size, int64_t step); +int atg_unfold_backward(tensor *, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step); int atg_uniform_(tensor *, tensor self, double from, double to); int atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim); int atg_unique_dim(tensor *, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts); int atg_unique_dim_consecutive(tensor *, tensor self, int64_t dim, int return_inverse, int return_counts); +// tensor *atg_unsafe_chunk(tensor self, int64_t chunks, int64_t dim); +int atg_unsafe_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); +// tensor *atg_unsafe_split(tensor self, int64_t split_size, int64_t dim); +int atg_unsafe_split(tensor *, tensor self, int64_t split_size, int64_t dim); +// tensor *atg_unsafe_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); +int atg_unsafe_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); int atg_unsqueeze(tensor *, tensor self, int64_t dim); int atg_unsqueeze_(tensor *, tensor self, int64_t dim); -int atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bicubic2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bilinear2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_linear1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_trilinear3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +int atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bicubic2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bilinear2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); +int atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales); +int atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales); +int atg_upsample_linear1d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales); +int atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales); +int atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales); +int atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales); +int atg_upsample_nearest1d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales); +int atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales); +int atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w); +int atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w); +int atg_upsample_nearest2d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w); +int atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w); +int atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w); +int atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w); +int atg_upsample_nearest3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w); +int atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w); +int atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w); +int atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w); +int atg_upsample_trilinear3d_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w); +int atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w); +int atg_value_selecting_reduction_backward(tensor *, tensor grad, int64_t dim, tensor indices, int64_t *sizes_data, int sizes_len, int keepdim); int atg_values(tensor *, tensor self); +int atg_vander(tensor *, tensor x, int64_t n, int increasing); int atg_var(tensor *, tensor self, int unbiased); -int atg_var1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +int atg_var_correction(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_var_correction_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_var_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); int atg_var_mean(tensor *, tensor self, int unbiased); -int atg_var_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +int atg_var_mean_correction(tensor *, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim); +int atg_var_mean_dim(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); int atg_var_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +int atg_vdot(tensor *, tensor self, tensor other); +int atg_vdot_out(tensor *, tensor out, tensor self, tensor other); int atg_view(tensor *, tensor self, int64_t *size_data, int size_len); int atg_view_as(tensor *, tensor self, tensor other); +int atg_view_as_complex(tensor *, tensor self); +int atg_view_as_real(tensor *, tensor self); +int atg_view_dtype(tensor *, tensor self, int dtype); +// tensor *atg_vsplit(tensor self, int64_t sections); +int atg_vsplit(tensor *, tensor self, int64_t sections); +// tensor *atg_vsplit_array(tensor self, int64_t *indices_data, int indices_len); +int atg_vsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); +int atg_vstack(tensor *, tensor *tensors_data, int tensors_len); +int atg_vstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); // tensor *atg_where(tensor condition); int atg_where(tensor *, tensor condition); -int atg_where1(tensor *, tensor condition, tensor self, tensor other); +int atg_where_scalar(tensor *, tensor condition, scalar self, scalar other); +int atg_where_scalarother(tensor *, tensor condition, tensor self, scalar other); +int atg_where_scalarself(tensor *, tensor condition, scalar self, tensor other); +int atg_where_self(tensor *, tensor condition, tensor self, tensor other); +int atg_xlogy(tensor *, tensor self, tensor other); +int atg_xlogy_(tensor *, tensor self, tensor other); +int atg_xlogy_outscalar_other(tensor *, tensor out, tensor self, scalar other); +int atg_xlogy_outscalar_self(tensor *, tensor out, scalar self, tensor other); +int atg_xlogy_outtensor(tensor *, tensor out, tensor self, tensor other); +int atg_xlogy_scalar_other(tensor *, tensor self, scalar other); +int atg_xlogy_scalar_other_(tensor *, tensor self, scalar other); +int atg_xlogy_scalar_self(tensor *, scalar self, tensor other); int atg_zero_(tensor *, tensor self); int atg_zeros(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); int atg_zeros_like(tensor *, tensor self); -int atg_zeros_like1(tensor *, tensor self, int options_kind, int options_device); int atg_zeros_out(tensor *, tensor out, int64_t *size_data, int size_len); From b24c65a6fca4eb90c0a55156c25b1acf024518fa Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Thu, 16 Nov 2023 01:57:34 +0100 Subject: [PATCH 08/12] Changed return type of functions to status code A. Non-void, non-* methods. Search/replace: 1. torch_api.h: ^(?!void)(\w+) (at.+)\( -> int $2($1 *, 2. torch_api.h: , \);$ -> ); 3. torch_api.cpp: ^(?!void)(\w+) (at.+)\( -> int $2($1 *out__, 4. torch_api.cpp: , \) \{$ -> ) { B. void-methods. Search/replace 1. torch_api.{h,cpp}: ^void (at.+)\( -> int $1( C. *-methods. Search/replace 1. torch.api.h: ^(\w+ \*)(at.+)\( -> int $2($1*, 2. torch_api.cpp: ^(\w+ \*)(at.+)\( -> int $2($1*out__, D. Implemented return status code Replaced ``` ^(\s*) return new (.+) \s*\) \s*return nullptr; ``` with ``` $1 out__[0] = new $2 $1 return 0; $1) $1return 1; ``` E. Implemented return status code 1. Replaced ``` ^(\s*)PROTECT\(return new (.+)\) \s*return nullptr; ``` with ``` $1PROTECT( $1 out__[0] = new $2 $1 return 0; $1) $1return 1; ``` F. Implemented return status code 1. Replaced ``` ^(\s*)PROTECT\(return (.+)\) ``` with ``` $1PROTECT( $1 out__[0] = $2 $1 return 0; $1) ``` G. Implemented return status code 1. Replaced ``` ^(\s*) return (.+) \s*\) \s*return nullptr; ``` with ``` $1 out__[0] = $2 $1 return 0; $1) $1return 1; ``` H. Restored error handling Handled caml_failwith by search/replace: Replaced: ``` $ ^(\s*) caml_failwith\((.+) ``` with: ``` { $1 myerr = strdup($2 $1 return 1; $1} ``` I. Replaced ``` ^(\s+)PROTECT\( return (.+) \) ``` with ``` $1PROTECT( $1 out__[0] = $2 $1 return 0; $1) ``` J. Manual implement return status code K. Changed return code from -1 to 1 to reduce diff L. Fixed a couple of warnings --- deps/c_wrapper/torch_api.cpp | 819 +++++++++++++++++++++++------------ deps/c_wrapper/torch_api.h | 224 +++++----- 2 files changed, 668 insertions(+), 375 deletions(-) diff --git a/deps/c_wrapper/torch_api.cpp b/deps/c_wrapper/torch_api.cpp index 84f6ccc5..f1b82f2d 100644 --- a/deps/c_wrapper/torch_api.cpp +++ b/deps/c_wrapper/torch_api.cpp @@ -3,13 +3,33 @@ #include #include #include -#include #include "torch_api.h" +#define caml_invalid_argument printf + using namespace std; -void at_manual_seed(int64_t seed) { - torch::manual_seed(seed); +int get_last_error(char *err) { + int len = strlen(myerr); + for (int i = 0; i < len; ++i) err[i] = myerr[i]; + err[len] = '\0'; + return 0; +} + +int flush_error() { + PROTECT( + myerr = ""; + return 0; + ) + return 1; +} + +int at_manual_seed(int64_t seed) { + PROTECT( + torch::manual_seed(seed); + return 0; + ) + return 1; } vector of_carray_tensor(torch::Tensor **vs, int len) { @@ -31,31 +51,39 @@ at::Device device_of_int(int d) { return at::Device(at::kCUDA, /*index=*/d); } -tensor at_new_tensor() { +int at_new_tensor(tensor *out__) { PROTECT( - return new torch::Tensor(); + out__[0] = new torch::Tensor(); + return 0; ) - return nullptr; + return 1; } -tensor at_tensor_of_data(void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { +int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { PROTECT( torch::Tensor tensor = torch::zeros(torch::IntArrayRef(dims, ndims), torch::ScalarType(type)); - if (element_size_in_bytes != tensor.element_size()) - caml_failwith("incoherent element sizes in bytes"); + if (element_size_in_bytes != tensor.element_size()) { + myerr = strdup("incoherent element sizes in bytes"); + return 1; + } void *tensor_data = tensor.data_ptr(); memcpy(tensor_data, vs, tensor.numel() * element_size_in_bytes); - return new torch::Tensor(tensor); + out__[0] = new torch::Tensor(tensor); + return 0; ) - return nullptr; + return 1; } -void at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { +int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { PROTECT( - if (elt_size_in_bytes != tensor->element_size()) - caml_failwith("incoherent element sizes in bytes"); - if (numel != tensor->numel()) - caml_failwith("incoherent number of elements"); + if (elt_size_in_bytes != tensor->element_size()) { + myerr = strdup("incoherent element sizes in bytes"); + return 1; + } + if (numel != tensor->numel()) { + myerr = strdup("incoherent number of elements"); + return 1; + } if (tensor->device().type() != at::kCPU) { torch::Tensor tmp_tensor = tensor->to(at::kCPU).contiguous(); void *tensor_data = tmp_tensor.data_ptr(); @@ -66,125 +94,164 @@ void at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) void *tensor_data = tmp_tensor.data_ptr(); memcpy(vs, tensor_data, numel * elt_size_in_bytes); } + return 0; ) + return 1; } -tensor at_float_vec(double *vs, int len, int type) { +int at_float_vec(tensor *out__, double *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; - return new torch::Tensor(tensor); + out__[0] = new torch::Tensor(tensor); + return 0; ) - return nullptr; + return 1; } -tensor at_int_vec(int64_t *vs, int len, int type) { +int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; - return new torch::Tensor(tensor); + out__[0] = new torch::Tensor(tensor); + return 0; ) - return nullptr; + return 1; } -int at_defined(tensor t) { - PROTECT(return t->defined();) - return -1; +int at_defined(int *out__, tensor t) { + PROTECT( + out__[0] = t->defined(); + return 0; + ) + return 1; } -int at_is_sparse(tensor t) { - PROTECT(return t->is_sparse();) - return -1; +int at_is_sparse(int *out__, tensor t) { + PROTECT( + out__[0] = t->is_sparse(); + return 0; + ) + return 1; } -int at_dim(tensor t) { - PROTECT(return t->dim();) - return -1; +int at_dim(int *out__, tensor t) { + PROTECT( + out__[0] = t->dim(); + return 0; + ) + return 1; } -void at_shape(tensor t, int *dims) { +int at_shape(tensor t, int *dims) { PROTECT( int i = 0; for (int dim : t->sizes()) dims[i++] = dim; + return 0; ) + return 1; } -void at_stride(tensor t, int64_t *dims) { +int at_stride(tensor t, int64_t *dims) { PROTECT( int i = 0; for (int64_t dim: t->strides()) dims[i++] = dim; + return 0; ) + return 1; } -int at_scalar_type(tensor t) { +int at_scalar_type(int *out__, tensor t) { PROTECT( - return static_cast(t->scalar_type()); + out__[0] = static_cast(t->scalar_type()); + return 0; ) + return 1; } -void at_autocast_clear_cache() { - at::autocast::clear_cache(); +int at_autocast_clear_cache() { + PROTECT( + at::autocast::clear_cache(); + return 0; + ) + return 1; } -int at_autocast_decrement_nesting() { +int at_autocast_decrement_nesting(int *out__) { PROTECT( - return at::autocast::decrement_nesting(); + out__[0] = at::autocast::decrement_nesting(); + return 0; ) - return -1; + return 1; } -int at_autocast_increment_nesting() { +int at_autocast_increment_nesting(int *out__) { PROTECT( - return at::autocast::increment_nesting(); + out__[0] = at::autocast::increment_nesting(); + return 0; ) - return -1; + return 1; } -int at_autocast_is_enabled() { +int at_autocast_is_enabled(int *out__) { PROTECT( - return at::autocast::is_enabled(); + out__[0] = at::autocast::is_enabled(); + return 0; ) - return -1; + return 1; } -int at_autocast_set_enabled(int b) { +int at_autocast_set_enabled(int *out__, int b) { PROTECT( bool is_enabled = at::autocast::is_enabled(); at::autocast::set_enabled(b); - return is_enabled; + out__[0] = is_enabled; ) - return -1; + return 1; } -int at_device(tensor tensor) { +int at_device(int *out__, tensor tensor) { PROTECT ( auto device = tensor->device(); - if (device.is_cpu()) return -1; - return device.index(); + if (device.is_cpu()) out__[0] = -1; + out__[0] = device.index(); + return 0; ) + return 1; } -void at_backward(tensor t, int keep_graph, int create_graph) { - PROTECT(t->backward({}, keep_graph, create_graph);) +int at_backward(tensor t, int keep_graph, int create_graph) { + PROTECT( + t->backward({}, keep_graph, create_graph); + return 0; + ) + return 1; } -int at_requires_grad(tensor t) { - PROTECT(return t->requires_grad();) - return -1; +int at_requires_grad(int *out__, tensor t) { + PROTECT( + out__[0] = t->requires_grad(); + return 0; + ) + return 1; } -int at_grad_set_enabled(int b) { +int at_grad_set_enabled(int *out__, int b) { PROTECT( bool is_enabled = torch::autograd::GradMode::is_enabled(); torch::autograd::GradMode::set_enabled(b); - return is_enabled; + out__[0] = is_enabled; + return 0; ) - return -1; + return 1; } -tensor at_get(tensor t, int index) { - PROTECT(return new torch::Tensor((*t)[index]);) - return nullptr; +int at_get(tensor *out__, tensor t, int index) { + PROTECT( + out__[0] = new torch::Tensor((*t)[index]); + return 0; + ) + return 1; } template @@ -199,77 +266,106 @@ T at_value_at_indexes(tensor t, int *indexes, int indexes_len) { return T(); } -double at_double_value_at_indexes(tensor t, int *indexes, int indexes_len) { - return at_value_at_indexes(t, indexes, indexes_len); +int at_double_value_at_indexes(double *out__, tensor t, int *indexes, int indexes_len) { + PROTECT( + out__[0] = at_value_at_indexes(t, indexes, indexes_len); + return 0; + ) + return 1; } -int64_t at_int64_value_at_indexes(tensor t, int *indexes, int indexes_len) { - return at_value_at_indexes(t, indexes, indexes_len); +int at_int64_value_at_indexes(int64_t *out__, tensor t, int *indexes, int indexes_len) { + PROTECT( + out__[0] = at_value_at_indexes(t, indexes, indexes_len); + return 0; + ) + return 1; } template -void at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { +int at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { PROTECT( torch::Tensor tensor = *t; for (int i = 0; i < indexes_len; ++i) { tensor = tensor[indexes[i]]; } tensor.fill_(v); + return 0; ) + return 1; } -void at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { - at_set_value_at_indexes(t, indexes, indexes_len, v); +int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { + return at_set_value_at_indexes(t, indexes, indexes_len, v); } -void at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { - at_set_value_at_indexes(t, indexes, indexes_len, v); +int at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { + return at_set_value_at_indexes(t, indexes, indexes_len, v); } -void at_fill_double(tensor t, double v) { - PROTECT(t->fill_(v);) +int at_fill_double(tensor t, double v) { + PROTECT( + t->fill_(v); + return 0; + ) + return 1; } -void at_fill_int64(tensor t, int64_t v) { - PROTECT(t->fill_(v);) +int at_fill_int64(tensor t, int64_t v) { + PROTECT( + t->fill_(v); + return 0; + ) + return 1; } -void at_print(tensor t) { +int at_print(tensor t) { PROTECT( torch::Tensor *tensor = (torch::Tensor*)t; cout << *tensor << endl; + return 0; ) + return 1; } -char *at_to_string(tensor t, int line_size) { +int at_to_string(char **out__, tensor t, int line_size) { PROTECT( std::ostringstream oss; torch::print(oss, *t, line_size); - return strdup(oss.str().c_str()); + out__[0] = strdup(oss.str().c_str()); + return 0; ) - return nullptr; + return 1; } -void at_copy_(tensor dst, tensor src) { +int at_copy_(tensor dst, tensor src) { PROTECT( dst->copy_(*src); + return 0; ) + return 1; } -void at_save(tensor t, char *filename) { - PROTECT(torch::save(*t, filename);) +int at_save(tensor t, char *filename) { + PROTECT( + torch::save(*t, filename); + return 0; + ) + return 1; } -void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::OutputArchive archive; for (int i = 0; i < ntensors; ++i) archive.write(std::string(tensor_names[i]), *(tensors[i]), /* buffer=*/ false); archive.save_to(filename); + return 0; ) + return 1; } -void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::InputArchive archive; archive.load_from(std::string(filename)); @@ -280,20 +376,24 @@ void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *fil // [read], no memory has to be freed. for (int i = 0; i < ntensors; ++i) tensors[i] = new torch::Tensor(ts[i]); + return 0; ) + return 1; } -void at_load_callback(char *filename, void (*f)(char *, tensor)) { +int at_load_callback(char *filename, void (*f)(char *, tensor)) { PROTECT( auto module = torch::jit::load(filename); for (const auto &p : module.named_parameters()) { auto v = p.value; f((char*)p.name.c_str(), new torch::Tensor(v)); } + return 0; ) + return 1; } -void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::NoGradGuard no_grad; torch::serialize::InputArchive archive; @@ -307,41 +407,62 @@ void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *fi tensors[i]->copy_(tmp_tensor); } } + return 0; ) + return 1; } -tensor at_load(char *filename) { +int at_load(tensor *out__, char *filename) { PROTECT( torch::Tensor tensor; torch::load(tensor, filename); - return new torch::Tensor(tensor); + out__[0] = new torch::Tensor(tensor); + return 0; ) - return nullptr; + return 1; } -int at_get_num_interop_threads() { - PROTECT(return at::get_num_interop_threads();) - return -1; +int at_get_num_interop_threads(int *out__) { + PROTECT( + out__[0] = at::get_num_interop_threads(); + return 0; + ) + return 1; } -int at_get_num_threads() { - PROTECT(return at::get_num_threads();) - return -1; +int at_get_num_threads(int *out__) { + PROTECT( + out__[0] = at::get_num_threads(); + return 0; + ) + return 1; } -void at_set_num_interop_threads(int n_threads) { - PROTECT(at::set_num_interop_threads(n_threads);) +int at_set_num_interop_threads(int n_threads) { + PROTECT( + at::set_num_interop_threads(n_threads); + return 0; + ) + return 1; } -void at_set_num_threads(int n_threads) { - PROTECT(at::set_num_threads(n_threads);) +int at_set_num_threads(int n_threads) { + PROTECT( + at::set_num_threads(n_threads); + return 0; + ) + return 1; } -void at_free(tensor t) { - delete(t); +int at_free(tensor t) { + PROTECT( + delete(t); + return 0; + ) + return 1; } -void at_run_backward(tensor *tensors, +int at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs, @@ -368,10 +489,12 @@ void at_run_backward(tensor *tensors, for (int i = 0; i < ninputs; ++i) { outputs[i] = static_cast(new torch::autograd::Variable(vl[i])); } + return 0; ) + return 1; } -optimizer ato_adam(double learning_rate, +int ato_adam(optimizer *out__, double learning_rate, double beta1, double beta2, double weight_decay, @@ -382,12 +505,13 @@ optimizer ato_adam(double learning_rate, .betas(std::tuple(beta1, beta2)) .weight_decay(weight_decay) .eps(eps); - return new torch::optim::Adam(vector(), options); + out__[0] = new torch::optim::Adam(vector(), options); + return 0; ) - return nullptr; + return 1; } -optimizer ato_rmsprop(double learning_rate, +int ato_rmsprop(optimizer *out__, double learning_rate, double alpha, double eps, double weight_decay, @@ -401,12 +525,13 @@ optimizer ato_rmsprop(double learning_rate, .weight_decay(weight_decay) .momentum(momentum) .centered(centered != 0); - return new torch::optim::RMSprop(vector(), options); - ) - return nullptr; + out__[0] = new torch::optim::RMSprop(vector(), options); + return 0; + ) + return 1; } -optimizer ato_sgd(double learning_rate, +int ato_sgd(optimizer *out__, double learning_rate, double momentum, double dampening, double weight_decay, @@ -418,19 +543,22 @@ optimizer ato_sgd(double learning_rate, .dampening(dampening) .weight_decay(weight_decay) .nesterov(nesterov); - return new torch::optim::SGD(vector(), options); + out__[0] = new torch::optim::SGD(vector(), options); + return 0; ) - return nullptr; + return 1; } -void ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { +int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { PROTECT( for (int i = 0; i < ntensors; ++i) t->param_groups()[0].params().push_back(*(tensors[i])); + return 0; ) + return 1; } -void ato_set_learning_rate(optimizer t, double learning_rate) { +int ato_set_learning_rate(optimizer t, double learning_rate) { PROTECT( torch::optim::OptimizerOptions* d = &(t->defaults()); if (auto adam = dynamic_cast(d)) { @@ -465,10 +593,12 @@ void ato_set_learning_rate(optimizer t, double learning_rate) { } else caml_invalid_argument("unexpected optimizer"); + return 0; ) + return 1; } -void ato_set_momentum(optimizer t, double momentum) { +int ato_set_momentum(optimizer t, double momentum) { PROTECT( torch::optim::OptimizerOptions* d = &(t->defaults()); if (auto adam = dynamic_cast(d)) { @@ -502,105 +632,153 @@ void ato_set_momentum(optimizer t, double momentum) { } } else - caml_invalid_argument("unexpected optimizer"); + caml_invalid_argument("unexpected optimizer"); + return 0; ) + return 1; } - -void ato_zero_grad(optimizer t) { - PROTECT(t->zero_grad();) +int ato_zero_grad(optimizer t) { + PROTECT( + t->zero_grad(); + return 0; + ) + return 1; } -void ato_step(optimizer t) { - PROTECT(t->step();) +int ato_step(optimizer t) { + PROTECT( + t->step(); + return 0; + ) + return 1; } -void ato_free(optimizer t) { - delete(t); +int ato_free(optimizer t) { + PROTECT( + delete(t); + return 0; + ) + return 1; } -scalar ats_int(int64_t v) { - PROTECT(return new torch::Scalar(v);) - return nullptr; +int ats_int(scalar *out__, int64_t v) { + PROTECT( + out__[0] = new torch::Scalar(v); + return 0; + ) + return 1; } -scalar ats_float(double v) { - PROTECT(return new torch::Scalar(v);) - return nullptr; +int ats_float(scalar *out__, double v) { + PROTECT( + out__[0] = new torch::Scalar(v); + return 0; + ) + return 1; } -int64_t ats_to_int(scalar s) { - PROTECT(return s->toLong();) +int ats_to_int(int64_t *out__, scalar s) { + PROTECT( + out__[0] = s->toLong(); + return 0; + ) return -1; } -double ats_to_float(scalar s) { - PROTECT(return s->toDouble();) - return 0.; +int ats_to_float(double *out__, scalar s) { + PROTECT( + out__[0] = s->toDouble(); + return 0; + ) + return 1; } -char *ats_to_string(scalar s) { +int ats_to_string(char **out__, scalar s) { PROTECT( using namespace at; std::ostringstream oss; oss << (*s); - return strdup(oss.str().c_str()); + out__[0] = strdup(oss.str().c_str()); + return 0; ) - return nullptr; + return 1; } -void ats_free(scalar s) { - delete(s); +int ats_free(scalar s) { + PROTECT( + delete(s); + return 0; + ) + return 1; } -int atc_cuda_device_count() { - PROTECT(return torch::cuda::device_count();) - return -1; +int atc_cuda_device_count(int *out__) { + PROTECT( + out__[0] = torch::cuda::device_count(); + return 0; + ) + return 1; } -int atc_cuda_is_available() { - PROTECT(return torch::cuda::is_available();) - return -1; +int atc_cuda_is_available(int *out__) { + PROTECT( + out__[0] = torch::cuda::is_available(); + return 0; + ) + return 1; } -int atc_cudnn_is_available() { - PROTECT(return torch::cuda::cudnn_is_available();) - return -1; +int atc_cudnn_is_available(int *out__) { + PROTECT( + out__[0] = torch::cuda::cudnn_is_available(); + return 0; + ) + return 1; } -void atc_set_benchmark_cudnn(int b) { - at::globalContext().setBenchmarkCuDNN(b); +int atc_set_benchmark_cudnn(int b) { + PROTECT( + at::globalContext().setBenchmarkCuDNN(b); + return 0; + ) + return 1; } -module atm_load(char *filename) { +int atm_load(module *out__, char *filename) { PROTECT( - return new torch::jit::script::Module(torch::jit::load(filename)); + out__[0] = new torch::jit::script::Module(torch::jit::load(filename)); + return 0; ) - return nullptr; + return 1; } -module atm_load_str(char *data, size_t sz) { +int atm_load_str(module *out__, char *data, size_t sz) { PROTECT( std::istringstream stream(std::string(data, sz)); - return new torch::jit::script::Module(torch::jit::load(stream)); + out__[0] = new torch::jit::script::Module(torch::jit::load(stream)); + return 0; ) - return nullptr; + return 1; } -tensor atm_forward(module m, tensor *tensors, int ntensors) { +int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { PROTECT( std::vector inputs; for (int i = 0; i < ntensors; ++i) inputs.push_back(*(tensors[i])); torch::jit::IValue output = m->forward(inputs); - if (!output.isTensor()) - caml_failwith("forward did not return a tensor"); - return new torch::Tensor(output.toTensor()); + if (!output.isTensor()) { + myerr = strdup("forward did not return a tensor"); + return 1; + } + out__[0] = new torch::Tensor(output.toTensor()); + return 0; ) - return nullptr; + return 1; } -ivalue atm_forward_(module m, +int atm_forward_(ivalue *out__, module m, ivalue *ivalues, int nivalues) { PROTECT( @@ -608,230 +786,323 @@ ivalue atm_forward_(module m, for (int i = 0; i < nivalues; ++i) inputs.push_back(*(ivalues[i])); torch::jit::IValue output = m->forward(inputs); - return new torch::jit::IValue(output); + out__[0] = new torch::jit::IValue(output); + return 0; ) - return nullptr; + return 1; } -void atm_free(module m) { - delete(m); +int atm_free(module m) { + PROTECT( + delete(m); + return 0; + ) + return 1; } -void atm_to(module m, int device, int dtype, bool non_blocking) { +int atm_to(module m, int device, int dtype, bool non_blocking) { PROTECT( m->to(device_of_int(device), at::ScalarType(dtype), non_blocking); + return 0; ) + return 1; } -ivalue ati_tensor(tensor t) { +int ati_tensor(ivalue *out__, tensor t) { PROTECT( - return new torch::jit::IValue(*t); + out__[0] = new torch::jit::IValue(*t); + return 0; ) - return nullptr; + return 1; } -ivalue ati_int(int64_t i) { +int ati_int(ivalue *out__, int64_t i) { PROTECT( - return new torch::jit::IValue(i); + out__[0] = new torch::jit::IValue(i); + return 0; ) - return nullptr; + return 1; } -ivalue ati_double(double d) { +int ati_double(ivalue *out__, double d) { PROTECT( - return new torch::jit::IValue(d); + out__[0] = new torch::jit::IValue(d); + return 0; ) - return nullptr; + return 1; } -ivalue ati_bool(int i) { +int ati_bool(ivalue *out__, int i) { PROTECT( - return new torch::jit::IValue((bool)i); + out__[0] = new torch::jit::IValue((bool)i); + return 0; ) - return nullptr; + return 1; } -ivalue ati_string(char *s) { +int ati_string(ivalue *out__, char *s) { PROTECT( string str(s); - return new torch::jit::IValue(str); + out__[0] = new torch::jit::IValue(str); + return 0; ) - return nullptr; + return 1; } -ivalue ati_none() { +int ati_none(ivalue *out__) { PROTECT( - return new torch::jit::IValue(); + out__[0] = new torch::jit::IValue(); + return 0; ) - return nullptr; + return 1; } -ivalue ati_tuple(ivalue *is, int nvalues) { +int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { PROTECT( vector vec; for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); - return new torch::jit::IValue(torch::ivalue::Tuple::create(vec)); + out__[0] = new torch::jit::IValue(torch::ivalue::Tuple::create(vec)); + return 0; ) - return nullptr; + return 1; } -ivalue ati_generic_list(ivalue *is, int nvalues) { +int ati_generic_list(ivalue *out__, ivalue *is, int nvalues) { PROTECT( c10::List vec(c10::AnyType::get()); for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); - return new torch::jit::IValue(c10::List(vec)); + out__[0] = new torch::jit::IValue(c10::List(vec)); + return 0; ) - return nullptr; + return 1; } -ivalue ati_generic_dict(ivalue *is, int nvalues) { +int ati_generic_dict(ivalue *out__, ivalue *is, int nvalues) { c10::Dict dict(c10::AnyType::get(), c10::AnyType::get()); PROTECT( for (int i = 0; i < nvalues; ++i) dict.insert(*(is[2*i]), *(is[2*i+1])); - return new torch::jit::IValue(dict); + out__[0] = new torch::jit::IValue(dict); + return 0; ) - return nullptr; + return 1; } -ivalue ati_int_list(int64_t *is, int nvalues) { +int ati_int_list(ivalue *out__, int64_t *is, int nvalues) { PROTECT( c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(is[i]); - return new torch::jit::IValue(vec); + out__[0] = new torch::jit::IValue(vec); + return 0; ) - return nullptr; + return 1; } -ivalue ati_double_list(double *is, int nvalues) { +int ati_double_list(ivalue *out__, double *is, int nvalues) { PROTECT( c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(is[i]); - return new torch::jit::IValue(vec); + out__[0] = new torch::jit::IValue(vec); + return 0; ) - return nullptr; + return 1; } -ivalue ati_bool_list(char *is, int nvalues) { +int ati_bool_list(ivalue *out__, char *is, int nvalues) { PROTECT( c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(is[i] != 0); - return new torch::jit::IValue(vec); + out__[0] = new torch::jit::IValue(vec); + return 0; ) - return nullptr; + return 1; } -ivalue ati_string_list(char **is, int nvalues) { +int ati_string_list(ivalue *out__, char **is, int nvalues) { PROTECT( c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(string(is[i])); - return new torch::jit::IValue(vec); + out__[0] = new torch::jit::IValue(vec); + return 0; ) - return nullptr; + return 1; } -ivalue ati_tensor_list(tensor *is, int nvalues) { +int ati_tensor_list(ivalue *out__, tensor *is, int nvalues) { PROTECT( c10::List vec; for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); - return new torch::jit::IValue(vec); + out__[0] = new torch::jit::IValue(vec); + return 0; ) - return nullptr; + return 1; } -int ati_tag(ivalue i) { +int ati_tag(int *out__, ivalue i) { PROTECT( - if (i->isNone()) return 0; - else if (i->isTensor()) return 1; - else if (i->isDouble()) return 2; - else if (i->isInt()) return 3; - else if (i->isBool()) return 4; - else if (i->isTuple()) return 5; - else if (i->isIntList()) return 6; - else if (i->isDoubleList()) return 7; - else if (i->isBoolList()) return 8; - else if (i->isString()) return 9; - else if (i->isTensorList()) return 10; - else if (i->isList()) return 12; - else if (i->isGenericDict()) return 13; - caml_failwith(("unsupported tag" + i->tagKind()).c_str()); - return -1; + if (i->isNone()) { + out__[0] = 0; + return 0; + } + else if (i->isTensor()) { + out__[0] = 1; + return 0; + } + else if (i->isDouble()) { + out__[0] = 2; + return 0; + } + else if (i->isInt()) { + out__[0] = 3; + return 0; + } + else if (i->isBool()) { + out__[0] = 4; + return 0; + } + else if (i->isTuple()) { + out__[0] = 5; + return 0; + } + else if (i->isIntList()) { + out__[0] = 6; + return 0; + } + else if (i->isDoubleList()) { + out__[0] = 7; + return 0; + } + else if (i->isBoolList()) { + out__[0] = 8; + return 0; + } + else if (i->isString()) { + out__[0] = 9; + return 0; + } + else if (i->isTensorList()) { + out__[0] = 10; + return 0; + } + else if (i->isList()) { + out__[0] = 12; + return 0; + } + else if (i->isGenericDict()) { + out__[0] = 13; + return 0; + } + myerr = strdup(("unsupported tag" + i->tagKind()).c_str()); + return 1; ) - return -1; + return 1; } -int64_t ati_to_int(ivalue i) { +int ati_to_int(int64_t *out__, ivalue i) { PROTECT( - return i->toInt(); + out__[0] = i->toInt(); + return 0; ) - return -1; + return 1; } -double ati_to_double(ivalue i) { +int ati_to_double(double *out__, ivalue i) { PROTECT( - return i->toDouble(); + out__[0] = i->toDouble(); + return 0; ) - return 0.; + return 1; } -int ati_to_bool(ivalue i) { +int ati_to_bool(int *out__, ivalue i) { PROTECT( - return i->toBool(); + out__[0] = i->toBool(); + return 0; ) - return -1; + return 1; } -char *ati_to_string(ivalue i) { +int ati_to_string(char **out__, ivalue i) { PROTECT( auto str = i->toStringRef(); - return strdup(str.c_str()); + out__[0] = strdup(str.c_str()); + return 0; ) - return nullptr; + return 1; } -tensor ati_to_tensor(ivalue i) { +int ati_to_tensor(tensor *out__, ivalue i) { PROTECT( - return new torch::Tensor(i->toTensor()); + out__[0] = new torch::Tensor(i->toTensor()); + return 0; ) - return nullptr; + return 1; } -int ati_length(ivalue i) { +int ati_length(int *out__, ivalue i) { PROTECT( - if (i->isTuple()) return i->toTuple()->elements().size(); - else if (i->isIntList()) return i->toIntList().size(); - else if (i->isDoubleList()) return i->toDoubleList().size(); - else if (i->isBoolList()) return i->toBoolList().size(); - else if (i->isString()) return i->toStringRef().size(); - else if (i->isTensorList()) return i->toTensorList().size(); - else if (i->isList()) return i->toList().size(); - else if (i->isGenericDict()) return i->toGenericDict().size(); + if (i->isTuple()) { + out__[0] = i->toTuple()->elements().size(); + return 0; + } + else if (i->isIntList()) { + out__[0] = i->toIntList().size(); + return 0; + } + else if (i->isDoubleList()) { + out__[0] = i->toDoubleList().size(); + return 0; + } + else if (i->isBoolList()) { + out__[0] = i->toBoolList().size(); + return 0; + } + else if (i->isString()) { + out__[0] = i->toStringRef().size(); + return 0; + } + else if (i->isTensorList()) { + out__[0] = i->toTensorList().size(); + return 0; + } + else if (i->isList()) { + out__[0] = i->toList().size(); + return 0; + } + else if (i->isGenericDict()) { + out__[0] = i->toGenericDict().size(); + return 0; + } caml_invalid_argument("unsupported tag for this length"); - return -1; + return 1; ) - return -1; + return 1; } -int ati_tuple_length(ivalue i) { +int ati_tuple_length(int *out__, ivalue i) { PROTECT( - return i->toTuple()->elements().size(); + out__[0] = i->toTuple()->elements().size(); + return 0; ) - return -1; + return 1; } -void ati_to_tuple(ivalue i, +int ati_to_tuple(ivalue i, ivalue *outputs, int noutputs) { PROTECT( auto vec = i->toTuple()->elements(); if (vec.size() != noutputs) { - caml_failwith("unexpected tuple size"); + myerr = strdup("unexpected tuple size"); + return 1; } for (int i = 0; i < noutputs; ++i) outputs[i] = new torch::jit::IValue(vec[i]); + return 0; ) + return 1; } -void ati_to_generic_list(ivalue i, +int ati_to_generic_list(ivalue i, ivalue *outputs, int noutputs) { PROTECT( @@ -841,10 +1112,12 @@ void ati_to_generic_list(ivalue i, } for (int i = 0; i < noutputs; ++i) outputs[i] = new torch::jit::IValue(vec[i]); + return 0; ) + return 1; } -void ati_to_generic_dict(ivalue i, +int ati_to_generic_dict(ivalue i, ivalue *outputs, int noutputs) { PROTECT( @@ -857,10 +1130,12 @@ void ati_to_generic_dict(ivalue i, outputs[k++] = new torch::jit::IValue(it->key()); outputs[k++] = new torch::jit::IValue(it->value()); } + return 0; ) + return 1; } -void ati_to_int_list(ivalue i, +int ati_to_int_list(ivalue i, int64_t *outputs, int noutputs) { PROTECT( @@ -870,10 +1145,12 @@ void ati_to_int_list(ivalue i, } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; + return 0; ) + return 1; } -void ati_to_double_list(ivalue i, +int ati_to_double_list(ivalue i, double *outputs, int noutputs) { PROTECT( @@ -883,10 +1160,12 @@ void ati_to_double_list(ivalue i, } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; + return 0; ) + return 1; } -void ati_to_bool_list(ivalue i, +int ati_to_bool_list(ivalue i, char *outputs, int noutputs) { PROTECT( @@ -896,10 +1175,12 @@ void ati_to_bool_list(ivalue i, } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; + return 0; ) + return 1; } -void ati_to_tensor_list(ivalue i, +int ati_to_tensor_list(ivalue i, tensor *outputs, int noutputs) { PROTECT( @@ -909,11 +1190,17 @@ void ati_to_tensor_list(ivalue i, } for (int i = 0; i < noutputs; ++i) outputs[i] = new torch::Tensor(vec[i]); + return 0; ) + return 1; } -void ati_free(ivalue i) { - delete(i); +int ati_free(ivalue i) { + PROTECT( + delete(i); + return 0; + ) + return 1; } #include "torch_api_generated.cpp.h" diff --git a/deps/c_wrapper/torch_api.h b/deps/c_wrapper/torch_api.h index 0b60ac09..031064d0 100644 --- a/deps/c_wrapper/torch_api.h +++ b/deps/c_wrapper/torch_api.h @@ -9,11 +9,14 @@ typedef torch::Scalar *scalar; typedef torch::optim::Optimizer *optimizer; typedef torch::jit::script::Module *module; typedef torch::jit::IValue *ivalue; +char const *myerr = ""; #define PROTECT(x) \ try { \ x \ } catch (const exception& e) { \ - caml_failwith(strdup(e.what())); \ + myerr = strdup(e.what()); \ + /* jl_error(strdup(e.what())); */ \ + /* throw(e.what()); */ \ } #else typedef void *tensor; @@ -23,61 +26,64 @@ typedef void *module; typedef void *ivalue; #endif -void at_manual_seed(int64_t); -tensor at_new_tensor(); -tensor at_tensor_of_data(void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); -void at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); -tensor at_float_vec(double *values, int value_len, int type); -tensor at_int_vec(int64_t *values, int value_len, int type); - -int at_defined(tensor); -int at_is_sparse(tensor); -int at_device(tensor); -int at_dim(tensor); -void at_shape(tensor, int *); -void at_stride(tensor, int *); -int at_scalar_type(tensor); - -void at_autocast_clear_cache(); -int at_autocast_decrement_nesting(); -int at_autocast_increment_nesting(); -int at_autocast_is_enabled(); -int at_autocast_set_enabled(int b); - -void at_backward(tensor, int, int); -int at_requires_grad(tensor); -int at_grad_set_enabled(int); - -tensor at_get(tensor, int index); -void at_fill_double(tensor, double); -void at_fill_int64(tensor, int64_t); - -double at_double_value_at_indexes(tensor, int *indexes, int indexes_len); -int64_t at_int64_value_at_indexes(tensor, int *indexes, int indexes_len); -void at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); -void at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); - -void at_copy_(tensor dst, tensor src); - -void at_print(tensor); -char *at_to_string(tensor, int line_size); -void at_save(tensor, char *filename); -tensor at_load(char *filename); - -int at_get_num_threads(); -void at_set_num_threads(int n_threads); - -void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +int get_last_error(char *); +int flush_error(); + +int at_manual_seed(int64_t); +int at_new_tensor(tensor *); +int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); +int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); +int at_float_vec(tensor *, double *values, int value_len, int type); +int at_int_vec(tensor *, int64_t *values, int value_len, int type); + +int at_defined(int *, tensor); +int at_is_sparse(int *, tensor); +int at_device(int *, tensor); +int at_dim(int *, tensor); +int at_shape(tensor, int *); +int at_stride(tensor, int *); +int at_scalar_type(int *, tensor); + +int at_autocast_clear_cache(); +int at_autocast_decrement_nesting(int *); +int at_autocast_increment_nesting(int *); +int at_autocast_is_enabled(int *); +int at_autocast_set_enabled(int *, int b); + +int at_backward(tensor, int, int); +int at_requires_grad(int *, tensor); +int at_grad_set_enabled(int *, int); + +int at_get(tensor *, tensor, int index); +int at_fill_double(tensor, double); +int at_fill_int64(tensor, int64_t); + +int at_double_value_at_indexes(double *, tensor, int *indexes, int indexes_len); +int at_int64_value_at_indexes(int64_t *, tensor, int *indexes, int indexes_len); +int at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); +int at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); + +int at_copy_(tensor dst, tensor src); + +int at_print(tensor); +int at_to_string(char **, tensor, int line_size); +int at_save(tensor, char *filename); +int at_load(tensor *, char *filename); + +int at_get_num_threads(int *); +int at_set_num_threads(int n_threads); + +int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi] takes as input an array of nullptr for [tensors]. */ -void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi_] takes as input an array of allocation [tensors]. */ -void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); +int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); -void at_load_callback(char *filename, void (*f)(char *, tensor)); +int at_load_callback(char *filename, void (*f)(char *, tensor)); -void at_free(tensor); +int at_free(tensor); -void at_run_backward(tensor *tensors, +int at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs, @@ -85,79 +91,79 @@ void at_run_backward(tensor *tensors, int keep_graph, int create_graph); -optimizer ato_adam(double learning_rate, +int ato_adam(optimizer *, double learning_rate, double beta1, double beta2, double weight_decay, double eps); -optimizer ato_rmsprop(double learning_rate, +int ato_rmsprop(optimizer *, double learning_rate, double alpha, double eps, double weight_decay, double momentum, int centered); -optimizer ato_sgd(double learning_rate, +int ato_sgd(optimizer *, double learning_rate, double momentum, double dampening, double weight_decay, int nesterov); -void ato_add_parameters(optimizer, tensor *, int ntensors); -void ato_set_learning_rate(optimizer, double learning_rate); -void ato_set_momentum(optimizer, double momentum); -void ato_zero_grad(optimizer); -void ato_step(optimizer); -void ato_free(optimizer); - -scalar ats_int(int64_t); -scalar ats_float(double); -void ats_free(scalar); - -int atc_cuda_device_count(); -int atc_cuda_is_available(); -int atc_cudnn_is_available(); -void atc_set_benchmark_cudnn(int b); - -module atm_load(char *); -tensor atm_forward(module, tensor *tensors, int ntensors); -ivalue atm_forward_(module, +int ato_add_parameters(optimizer, tensor *, int ntensors); +int ato_set_learning_rate(optimizer, double learning_rate); +int ato_set_momentum(optimizer, double momentum); +int ato_zero_grad(optimizer); +int ato_step(optimizer); +int ato_free(optimizer); + +int ats_int(scalar *, int64_t); +int ats_float(scalar *, double); +int ats_free(scalar); + +int atc_cuda_device_count(int *); +int atc_cuda_is_available(int *); +int atc_cudnn_is_available(int *); +int atc_set_benchmark_cudnn(int b); + +int atm_load(module *, char *); +int atm_forward(tensor *, module, tensor *tensors, int ntensors); +int atm_forward_(ivalue *, module, ivalue *ivalues, int nivalues); -void atm_free(module); - -ivalue ati_none(); -ivalue ati_tensor(tensor); -ivalue ati_bool(int); -ivalue ati_int(int64_t); -ivalue ati_double(double); -ivalue ati_tuple(ivalue *, int); -ivalue ati_string(char *); -ivalue ati_tuple(ivalue *, int); -ivalue ati_generic_list(ivalue *, int); -ivalue ati_generic_dict(ivalue *, int); -ivalue ati_int_list(int64_t *, int); -ivalue ati_double_list(double *, int); -ivalue ati_bool_list(char *, int); -ivalue ati_string_list(char **, int); -ivalue ati_tensor_list(tensor *, int); - -tensor ati_to_tensor(ivalue); -int64_t ati_to_int(ivalue); -double ati_to_double(ivalue); -char *ati_to_string(ivalue); -int ati_to_bool(ivalue); -int ati_length(ivalue); -int ati_tuple_length(ivalue); -void ati_to_tuple(ivalue, ivalue *, int); -void ati_to_generic_list(ivalue, ivalue *, int); -void ati_to_generic_dict(ivalue, ivalue *, int); -void ati_to_int_list(ivalue, int64_t *, int); -void ati_to_double_list(ivalue, double *, int); -void ati_to_bool_list(ivalue, char *, int); -void ati_to_tensor_list(ivalue, tensor *, int); - -int ati_tag(ivalue); - -void ati_free(ivalue); +int atm_free(module); + +int ati_none(ivalue *); +int ati_tensor(ivalue *, tensor); +int ati_bool(ivalue *, int); +int ati_int(ivalue *, int64_t); +int ati_double(ivalue *, double); +int ati_tuple(ivalue *, ivalue *, int); +int ati_string(ivalue *, char *); +int ati_tuple(ivalue *, ivalue *, int); +int ati_generic_list(ivalue *, ivalue *, int); +int ati_generic_dict(ivalue *, ivalue *, int); +int ati_int_list(ivalue *, int64_t *, int); +int ati_double_list(ivalue *, double *, int); +int ati_bool_list(ivalue *, char *, int); +int ati_string_list(ivalue *, char **, int); +int ati_tensor_list(ivalue *, tensor *, int); + +int ati_to_tensor(tensor *, ivalue); +int ati_to_int(int64_t *, ivalue); +int ati_to_double(double *, ivalue); +int ati_to_string(char **, ivalue); +int ati_to_bool(int *, ivalue); +int ati_length(int *, ivalue); +int ati_tuple_length(int *, ivalue); +int ati_to_tuple(ivalue, ivalue *, int); +int ati_to_generic_list(ivalue, ivalue *, int); +int ati_to_generic_dict(ivalue, ivalue *, int); +int ati_to_int_list(ivalue, int64_t *, int); +int ati_to_double_list(ivalue, double *, int); +int ati_to_bool_list(ivalue, char *, int); +int ati_to_tensor_list(ivalue, tensor *, int); + +int ati_tag(int *, ivalue); + +int ati_free(ivalue); #include "torch_api_generated.h" From ece9654619b1baf71235efc07fb8cb1bc49e559c Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Mon, 13 Nov 2023 09:13:34 +0100 Subject: [PATCH 09/12] Restored Julia-specific additions Also, made CUDA build optional. int at_empty_cache(); int at_no_grad(int flag); int at_sync(); int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); --- deps/c_wrapper/CMakeLists.txt | 8 ++++++ deps/c_wrapper/torch_api.cpp | 50 +++++++++++++++++++++++++++++++++++ deps/c_wrapper/torch_api.h | 4 +++ 3 files changed, 62 insertions(+) diff --git a/deps/c_wrapper/CMakeLists.txt b/deps/c_wrapper/CMakeLists.txt index b32dedec..296c038b 100644 --- a/deps/c_wrapper/CMakeLists.txt +++ b/deps/c_wrapper/CMakeLists.txt @@ -1,6 +1,8 @@ cmake_minimum_required(VERSION 3.0 FATAL_ERROR) project(torch_c_api) +option(USE_CUDA "Use CUDA" ON) + set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR};${CMAKE_MODULE_PATH}") find_package(Torch REQUIRED) @@ -10,4 +12,10 @@ include_directories(SYSTEM path) add_library(torch_c_api "SHARED" torch_api.cpp) target_link_libraries(torch_c_api "${TORCH_LIBRARIES}") +if(USE_CUDA) + enable_language(CUDA) + target_compile_definitions(torch_c_api PRIVATE USE_CUDA) + target_include_directories(torch_c_api PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) +endif() + set_property(TARGET torch_c_api PROPERTY CXX_STANDARD 14) diff --git a/deps/c_wrapper/torch_api.cpp b/deps/c_wrapper/torch_api.cpp index f1b82f2d..ba84f5e2 100644 --- a/deps/c_wrapper/torch_api.cpp +++ b/deps/c_wrapper/torch_api.cpp @@ -2,6 +2,10 @@ #include #include #include +#ifdef USE_CUDA +#include +#include +#endif #include #include "torch_api.h" @@ -51,6 +55,16 @@ at::Device device_of_int(int d) { return at::Device(at::kCUDA, /*index=*/d); } +int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev) { + PROTECT( + auto options = torch::TensorOptions().device(torch::kCUDA, dev).requires_grad(false); + torch::Tensor tens = torch::from_blob(data, torch::IntArrayRef(dims, ndims), torch::IntArrayRef(strides, nstrides), options); + out__[0] = new torch::Tensor(tens); + return 0; + ) + return 1; +} + int at_new_tensor(tensor *out__) { PROTECT( out__[0] = new torch::Tensor(); @@ -59,6 +73,42 @@ int at_new_tensor(tensor *out__) { return 1; } +int at_empty_cache() { + PROTECT( +#if defined(USE_CUDA) + c10::cuda::CUDACachingAllocator::emptyCache(); + return 0; +#else + myerr = strdup("CUDA is disabled."); + return 1; +#endif + ) + return 1; +} + +int at_no_grad(int flag) { + PROTECT( + torch::GradMode::set_enabled((bool)flag); + return 0; + ) + return 1; +} + +int at_sync() { + PROTECT( +#ifdef USE_CUDA + at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); + C10_CUDA_CHECK(cudaStreamSynchronize(stream)); + return 0; +#else + myerr = strdup("CUDA is disabled."); + return 1; +#endif + ) + // torch::cuda::synchronize(); + return 1; +} + int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { PROTECT( torch::Tensor tensor = torch::zeros(torch::IntArrayRef(dims, ndims), torch::ScalarType(type)); diff --git a/deps/c_wrapper/torch_api.h b/deps/c_wrapper/torch_api.h index 031064d0..fdb57886 100644 --- a/deps/c_wrapper/torch_api.h +++ b/deps/c_wrapper/torch_api.h @@ -31,6 +31,10 @@ int flush_error(); int at_manual_seed(int64_t); int at_new_tensor(tensor *); +int at_empty_cache(); +int at_no_grad(int flag); +int at_sync(); +int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); int at_float_vec(tensor *, double *values, int value_len, int type); From c8f1e9c71d7a41a3193c5d9fec3ef88e4737b593 Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Fri, 11 Aug 2023 16:47:49 +0200 Subject: [PATCH 10/12] Dev. container: Added support for CUDA and CUDNN Also: * Dev. container: Updated for Torch 1.10.2 * Added /build to .gitignore --- .devcontainer/Dockerfile | 66 ++++++++++++++++++++++++++++----- .devcontainer/devcontainer.json | 14 ++++--- .devcontainer/postCreate.sh | 27 ++++++++++++++ .gitignore | 1 + deps/README.md | 10 ++++- 5 files changed, 101 insertions(+), 17 deletions(-) create mode 100755 .devcontainer/postCreate.sh diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index d841c4e1..36b133fe 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,18 +1,64 @@ -ARG BASE_IMAGE_TAG +ARG BASE_IMAGE_VARIANT=debian +ARG BASE_IMAGE_VERSION=11 + +ARG BASE_IMAGE_TAG=$BASE_IMAGE_VARIANT-$BASE_IMAGE_VERSION FROM mcr.microsoft.com/devcontainers/cpp:$BASE_IMAGE_TAG -ARG OCAML_VERSION -ARG OPAM_VERSION -ARG TORCH_VERSION +ARG CUDA_VERSION=11.8.0 +ARG CUDNN_VERSION=8.9.4 + +ARG OCAML_VERSION=4 +ARG OPAM_VERSION=2 -RUN sudo apt-get update \ - && sudo apt-get satisfy -y "ocaml (>= $OCAML_VERSION)" "opam (>= $OPAM_VERSION)" \ +ARG TORCH_VARIANT +ARG TORCH_VERSION=2.1.1 + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + && apt-get satisfy -y "ocaml (>= $OCAML_VERSION)" "opam (>= $OPAM_VERSION)" \ && rm -rf /var/lib/apt/lists/* -RUN cd /usr/local \ - && sudo wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2Bcpu.zip \ - && sudo unzip libtorch-*.zip \ - && sudo rm libtorch-*.zip +ENV JULIA_DEPOT_PATH=/opt/julia_depot +ENV JULIAUP_DEPOT_PATH=/opt/juliaup +RUN curl -fsSL https://install.julialang.org | sh -s -- --default-channel 1.9 --path /opt/juliaup --yes +ENV PATH=/opt/juliaup/bin:$PATH + +ENV CUDA_VERSION=$CUDA_VERSION +ENV CUDNN_VERSION=$CUDNN_VERSION + +RUN < LocalPreferences.toml +[CUDA_Runtime_jll] +version = "$CUDA_VERSION_MAJOR_MINOR" +EOT +CUDA_ROOT=$(julia --project --eval ' + using Pkg + CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) + CUDA_SDK_jll_pkg = :CUDA_SDK_jll + if CUDA_VERSION < v"11.4" + CUDA_SDK_jll_pkg = :CUDA_full_jll + end + Pkg.add(name=string(CUDA_SDK_jll_pkg), version=ENV["CUDA_VERSION"]) + @eval using $CUDA_SDK_jll_pkg + println(@eval $CUDA_SDK_jll_pkg.artifact_dir) +') +ln -s $CUDA_ROOT/cuda /usr/local/cuda +EOF +ENV PATH=$PATH:/usr/local/cuda/bin + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN if [ -z "$TORCH_VARIANT" ]; then export TORCH_VARIANT="cu$(echo $CUDA_VERSION | cut -d . -f 1-2 | tr -d '.')"; fi \ + && cd /usr/local \ + && wget -q "https://download.pytorch.org/libtorch/$TORCH_VARIANT/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2B$TORCH_VARIANT.zip" \ + && unzip -q libtorch-*.zip \ + && rm libtorch-*.zip ENV CMAKE_PREFIX_PATH=/usr/local/libtorch diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index e316adb1..1fdda147 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,21 +2,25 @@ "build": { "dockerfile": "Dockerfile", "args": { - "BASE_IMAGE_TAG": "debian-11", + "BASE_IMAGE_VARIANT": "debian", + "BASE_IMAGE_VERSION": "11", + "CUDA_VERSION": "11.3.1", + "CUDNN_VERSION": "8.2.4", "OCAML_VERSION": "4", "OPAM_VERSION": "2", - "TORCH_VERSION": "1.4.0" + "TORCH_VERSION": "1.10.2" } }, "customizations": { "vscode": { "extensions": [ + "julialang.language-julia", "ms-vscode.cpptools-extension-pack" ] } }, - "features": { - "ghcr.io/julialang/devcontainer-features/julia:1": {} + "hostRequirements": { + "gpu": "optional" }, - "postCreateCommand": "opam init --auto-setup" + "postCreateCommand": ".devcontainer/postCreate.sh" } diff --git a/.devcontainer/postCreate.sh b/.devcontainer/postCreate.sh new file mode 100755 index 00000000..777e36c2 --- /dev/null +++ b/.devcontainer/postCreate.sh @@ -0,0 +1,27 @@ +sudo chown -R vscode:vscode /opt/juliaup /opt/julia_depot + +CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2) +TMP_PROJECT=$(mktemp -d) +cd $TMP_PROJECT +touch Project.toml +cat < LocalPreferences.toml +[CUDA_Runtime_jll] +version = "$CUDA_VERSION_MAJOR_MINOR" +EOT +CUDNN_ROOT=$(julia --project --eval ' + using Pkg; + CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) + if CUDA_VERSION < v"11" + Pkg.add(name="CUDA_Runtime_jll", version="0.2") + elseif CUDA_VERSION < v"11.4" + Pkg.add(name="CUDA_Runtime_jll", version="0.7") + else + Pkg.add(name="CUDA_Runtime_jll") + end + Pkg.add(name="CUDNN_jll", version=ENV["CUDNN_VERSION"]); + using CUDNN_jll; + println(CUDNN_jll.artifact_dir)') \ +&& for F in $CUDNN_ROOT/include/cudnn*.h; do ln -s $F /usr/local/cuda/include/$(basename $F); done \ +&& for F in $CUDNN_ROOT/lib/libcudnn*; do ln -s $F /usr/local/cuda/lib64/$(basename $F); done + +opam init --disable-sandboxing --auto-setup diff --git a/.gitignore b/.gitignore index ccad2ae4..7afc7c25 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ /Manifest.toml +/build /.vscode diff --git a/deps/README.md b/deps/README.md index 45b93df8..edc128bb 100644 --- a/deps/README.md +++ b/deps/README.md @@ -6,8 +6,14 @@ Since Torch is a C++-library, a C wrapper is needed for Julia to interact with T The C wrapper can be generated from the `Declarations.yaml`-file included with `Torch_jll`: ```sh -mkdir c_wrapper_generator/data -curl https://raw.githubusercontent.com/LaurentMazare/ocaml-torch/main/third_party/pytorch/Declarations-v1.4.0.yaml -o c_wrapper_generator/data/Declarations.yaml +mkdir -p c_wrapper_generator/data +cp -v `julia --eval ' + using Pkg + Pkg.activate(; temp=true) + Pkg.add(name="Torch_jll", version="1.10") + import Torch_jll + print(joinpath(dirname(Torch_jll.libtorch_path), "..", "share", "ATen", "Declarations.yaml")) +'` c_wrapper_generator/data/ ``` The C wrapper can then be generated by building and running the (OCaml-based) C wrapper generator, e.g. by using the dev. container (which includes OCaml and OPAM): From f79913385b40ed1ebcd579c7deba890e6dfe318c Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Sat, 27 Apr 2024 11:50:31 +0200 Subject: [PATCH 11/12] Added GitHub Actions workflow for building C Wrapper Factored out scripts - for re-use in CI and dev. container. --- .dev/install_build_deps.sh | 16 +++++ .dev/install_cuda_sdk.sh | 27 ++++++++ .dev/install_cudnn.sh | 30 +++++++++ .dev/install_torch.sh | 11 ++++ .devcontainer/Dockerfile | 39 +++-------- .devcontainer/devcontainer.json | 2 + .devcontainer/postCreate.sh | 24 +------ .github/workflows/BuildCWrapper.yaml | 97 ++++++++++++++++++++++++++++ 8 files changed, 192 insertions(+), 54 deletions(-) create mode 100755 .dev/install_build_deps.sh create mode 100755 .dev/install_cuda_sdk.sh create mode 100755 .dev/install_cudnn.sh create mode 100755 .dev/install_torch.sh create mode 100644 .github/workflows/BuildCWrapper.yaml diff --git a/.dev/install_build_deps.sh b/.dev/install_build_deps.sh new file mode 100755 index 00000000..316b840a --- /dev/null +++ b/.dev/install_build_deps.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +GCC_VERSION=$1 + +apt-get update +DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + gcc-$GCC_VERSION \ + g++-$GCC_VERSION \ + jq \ + unzip \ + wget diff --git a/.dev/install_cuda_sdk.sh b/.dev/install_cuda_sdk.sh new file mode 100755 index 00000000..a042bd2a --- /dev/null +++ b/.dev/install_cuda_sdk.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +CUDA_VERSION=$1 + +CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2) +TMP_PROJECT=$(mktemp -d) +cd $TMP_PROJECT +touch Project.toml +cat < LocalPreferences.toml +[CUDA_Runtime_jll] +version = "$CUDA_VERSION_MAJOR_MINOR" +EOT +CUDA_ROOT=$(julia --project --eval ' + using Pkg + CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) + CUDA_SDK_jll_pkg = :CUDA_SDK_jll + if CUDA_VERSION < v"11.4" + CUDA_SDK_jll_pkg = :CUDA_full_jll + end + Pkg.add(name=string(CUDA_SDK_jll_pkg), version=ENV["CUDA_VERSION"]) + @eval using $CUDA_SDK_jll_pkg + println(@eval $CUDA_SDK_jll_pkg.artifact_dir) +') +ln -s $CUDA_ROOT/cuda /usr/local/cuda +export PATH=$PATH:/usr/local/cuda/bin diff --git a/.dev/install_cudnn.sh b/.dev/install_cudnn.sh new file mode 100755 index 00000000..117135e5 --- /dev/null +++ b/.dev/install_cudnn.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -e + +CUDA_VERSION=$1 +CUDNN_VERSION=$2 + +CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2) +TMP_PROJECT=$(mktemp -d) +cd $TMP_PROJECT +touch Project.toml +cat < LocalPreferences.toml +[CUDA_Runtime_jll] +version = "$CUDA_VERSION_MAJOR_MINOR" +EOT +CUDNN_ROOT=$(julia --project --eval ' + using Pkg; + CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) + if CUDA_VERSION < v"11" + Pkg.add(name="CUDA_Runtime_jll", version="0.2") + elseif CUDA_VERSION < v"11.4" + Pkg.add(name="CUDA_Runtime_jll", version="0.7") + else + Pkg.add(name="CUDA_Runtime_jll") + end + Pkg.add(name="CUDNN_jll", version=ENV["CUDNN_VERSION"]); + using CUDNN_jll; + println(CUDNN_jll.artifact_dir)') +for F in $CUDNN_ROOT/include/cudnn*.h; do ln -sf $F /usr/local/cuda/include/$(basename $F); done +for F in $CUDNN_ROOT/lib/libcudnn*; do ln -sf $F /usr/local/cuda/lib64/$(basename $F); done diff --git a/.dev/install_torch.sh b/.dev/install_torch.sh new file mode 100755 index 00000000..5c61fae5 --- /dev/null +++ b/.dev/install_torch.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -e + +TORCH_VARIANT=$1 +TORCH_VERSION=$2 + +cd /usr/local +wget -q "https://download.pytorch.org/libtorch/$TORCH_VARIANT/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2B$TORCH_VARIANT.zip" +unzip -q libtorch-*.zip +rm libtorch-*.zip diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 36b133fe..a6423038 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -8,16 +8,17 @@ FROM mcr.microsoft.com/devcontainers/cpp:$BASE_IMAGE_TAG ARG CUDA_VERSION=11.8.0 ARG CUDNN_VERSION=8.9.4 +ARG GCC_VERSION=10 + ARG OCAML_VERSION=4 ARG OPAM_VERSION=2 ARG TORCH_VARIANT ARG TORCH_VERSION=2.1.1 -RUN apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - build-essential \ - cmake \ +COPY .dev /opt/container + +RUN /opt/container/install_build_deps.sh $GCC_VERSION \ && apt-get satisfy -y "ocaml (>= $OCAML_VERSION)" "opam (>= $OPAM_VERSION)" \ && rm -rf /var/lib/apt/lists/* @@ -29,36 +30,12 @@ ENV PATH=/opt/juliaup/bin:$PATH ENV CUDA_VERSION=$CUDA_VERSION ENV CUDNN_VERSION=$CUDNN_VERSION -RUN < LocalPreferences.toml -[CUDA_Runtime_jll] -version = "$CUDA_VERSION_MAJOR_MINOR" -EOT -CUDA_ROOT=$(julia --project --eval ' - using Pkg - CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) - CUDA_SDK_jll_pkg = :CUDA_SDK_jll - if CUDA_VERSION < v"11.4" - CUDA_SDK_jll_pkg = :CUDA_full_jll - end - Pkg.add(name=string(CUDA_SDK_jll_pkg), version=ENV["CUDA_VERSION"]) - @eval using $CUDA_SDK_jll_pkg - println(@eval $CUDA_SDK_jll_pkg.artifact_dir) -') -ln -s $CUDA_ROOT/cuda /usr/local/cuda -EOF +RUN /opt/container/install_cuda_sdk.sh $CUDA_VERSION ENV PATH=$PATH:/usr/local/cuda/bin SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN if [ -z "$TORCH_VARIANT" ]; then export TORCH_VARIANT="cu$(echo $CUDA_VERSION | cut -d . -f 1-2 | tr -d '.')"; fi \ - && cd /usr/local \ - && wget -q "https://download.pytorch.org/libtorch/$TORCH_VARIANT/libtorch-cxx11-abi-shared-with-deps-$TORCH_VERSION%2B$TORCH_VARIANT.zip" \ - && unzip -q libtorch-*.zip \ - && rm libtorch-*.zip +RUN export TORCH_VARIANT="cu$(echo $CUDA_VERSION | cut -d . -f 1-2 | tr -d '.')" \ + && /opt/container/install_torch.sh $TORCH_VARIANT $TORCH_VERSION ENV CMAKE_PREFIX_PATH=/usr/local/libtorch diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1fdda147..06c9df23 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,11 +1,13 @@ { "build": { + "context": "..", "dockerfile": "Dockerfile", "args": { "BASE_IMAGE_VARIANT": "debian", "BASE_IMAGE_VERSION": "11", "CUDA_VERSION": "11.3.1", "CUDNN_VERSION": "8.2.4", + "GCC_VERSION": "10", "OCAML_VERSION": "4", "OPAM_VERSION": "2", "TORCH_VERSION": "1.10.2" diff --git a/.devcontainer/postCreate.sh b/.devcontainer/postCreate.sh index 777e36c2..81b5b3d6 100755 --- a/.devcontainer/postCreate.sh +++ b/.devcontainer/postCreate.sh @@ -1,27 +1,5 @@ sudo chown -R vscode:vscode /opt/juliaup /opt/julia_depot -CUDA_VERSION_MAJOR_MINOR=$(echo $CUDA_VERSION | cut -d . -f 1-2) -TMP_PROJECT=$(mktemp -d) -cd $TMP_PROJECT -touch Project.toml -cat < LocalPreferences.toml -[CUDA_Runtime_jll] -version = "$CUDA_VERSION_MAJOR_MINOR" -EOT -CUDNN_ROOT=$(julia --project --eval ' - using Pkg; - CUDA_VERSION = VersionNumber(ENV["CUDA_VERSION"]) - if CUDA_VERSION < v"11" - Pkg.add(name="CUDA_Runtime_jll", version="0.2") - elseif CUDA_VERSION < v"11.4" - Pkg.add(name="CUDA_Runtime_jll", version="0.7") - else - Pkg.add(name="CUDA_Runtime_jll") - end - Pkg.add(name="CUDNN_jll", version=ENV["CUDNN_VERSION"]); - using CUDNN_jll; - println(CUDNN_jll.artifact_dir)') \ -&& for F in $CUDNN_ROOT/include/cudnn*.h; do ln -s $F /usr/local/cuda/include/$(basename $F); done \ -&& for F in $CUDNN_ROOT/lib/libcudnn*; do ln -s $F /usr/local/cuda/lib64/$(basename $F); done +./.dev/install_cudnn.sh $CUDA_VERSION $CUDNN_VERSION opam init --disable-sandboxing --auto-setup diff --git a/.github/workflows/BuildCWrapper.yaml b/.github/workflows/BuildCWrapper.yaml new file mode 100644 index 00000000..39bf61a9 --- /dev/null +++ b/.github/workflows/BuildCWrapper.yaml @@ -0,0 +1,97 @@ +name: Build C Wrapper + +on: + push: + branches: + - master + tags: ['*'] + pull_request: + workflow_dispatch: + +concurrency: + # Skip intermediate builds: always. + # Cancel intermediate builds: only if it is a pull request build. + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + +jobs: + build_cpu: + name: Build (CPU) + runs-on: ubuntu-latest + timeout-minutes: 60 + permissions: # needed to allow julia-actions/cache to proactively delete old caches that it has created + actions: write + contents: read + container: + image: debian:10 + env: + GCC_VERSION: "8" + TORCH_VARIANT: cpu + TORCH_VERSION: "1.10.2" + USE_CUDA: "OFF" + steps: + - uses: actions/checkout@v4 + - run: ./.dev/install_build_deps.sh $GCC_VERSION + - name: Install Torch + run: ./.dev/install_torch.sh $TORCH_VARIANT $TORCH_VERSION + - name: Build + run: | + export CMAKE_PREFIX_PATH=/usr/local/libtorch + cd deps/c_wrapper + cmake -S . -B build -DUSE_CUDA=$USE_CUDA + cmake --build build + build_cuda: + name: Build (CUDA ${{ matrix.cuda_version }}, CUDNN ${{ matrix.cudnn_version }}, GCC ${{ matrix.gcc_version }}, Torch ${{ matrix.torch_version }}) + runs-on: ubuntu-latest + timeout-minutes: 60 + permissions: # needed to allow julia-actions/cache to proactively delete old caches that it has created + actions: write + contents: read + strategy: + fail-fast: false + matrix: + arch: + - x64 + cuda_version: + - "10.2.89" + - "11.3.1" + cudnn_version: + - "8.2.4" + gcc_version: + - "8" + julia_version: + - "1.9" + os_version: + - "10" + torch_version: + - "1.10.2" + container: + image: debian:${{ matrix.os_version }} + env: + CUDA_VERSION: ${{ matrix.cuda_version }} + CUDNN_VERSION: ${{ matrix.cudnn_version }} + GCC_VERSION: ${{ matrix.gcc_version }} + TORCH_VERSION: ${{ matrix.torch_version }} + USE_CUDA: "ON" + steps: + - uses: actions/checkout@v4 + - run: ./.dev/install_build_deps.sh $GCC_VERSION + - uses: julia-actions/setup-julia@v2 + with: + version: ${{ matrix.julia_version }} + arch: ${{ matrix.arch }} + - uses: julia-actions/cache@v1 + - name: Install CUDA SDK ${{ matrix.cuda_version }} and CUDNN ${{ matrix.cudnn_version }} + run: | + ./.dev/install_cuda_sdk.sh $CUDA_VERSION + ./.dev/install_cudnn.sh $CUDA_VERSION $CUDNN_VERSION + - name: Install Torch + run: | + export TORCH_VARIANT="cu$(echo $CUDA_VERSION | cut -d . -f 1-2 | tr -d '.')" + ./.dev/install_torch.sh $TORCH_VARIANT $TORCH_VERSION + - name: Build + run: | + export CMAKE_PREFIX_PATH=/usr/local/libtorch + cd deps/c_wrapper + cmake -S . -B build -DUSE_CUDA=$USE_CUDA + cmake --build build From 37c605640aa4fbfc681cce7fe8e3e8b668098a2d Mon Sep 17 00:00:00 2001 From: Jesper Stemann Andersen Date: Mon, 29 Apr 2024 12:59:12 +0200 Subject: [PATCH 12/12] Fixed C wrapper wrt. formatting --- deps/c_wrapper/torch_api_generated.cpp.h | 10918 ++++++++++----------- deps/c_wrapper/torch_api_generated.h | 29 - deps/c_wrapper_generator/bin/main.ml | 17 +- 3 files changed, 5436 insertions(+), 5528 deletions(-) diff --git a/deps/c_wrapper/torch_api_generated.cpp.h b/deps/c_wrapper/torch_api_generated.cpp.h index e4a649b4..b38f24fd 100644 --- a/deps/c_wrapper/torch_api_generated.cpp.h +++ b/deps/c_wrapper/torch_api_generated.cpp.h @@ -4,270 +4,270 @@ int atg___and__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::__and__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___and__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::__and__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___iand__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->__iand__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___iand__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->__iand__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ilshift__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->__ilshift__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ilshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->__ilshift__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ior__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->__ior__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ior__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->__ior__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___irshift__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->__irshift__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___irshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->__irshift__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ixor__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->__ixor__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___ixor__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->__ixor__(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___lshift__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::__lshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___lshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::__lshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___or__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::__or__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___or__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::__or__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___rshift__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::__rshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___rshift__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::__rshift__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___xor__(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::__xor__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg___xor__tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::__xor__(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__adaptive_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::_adaptive_avg_pool2d_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::_adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::_adaptive_avg_pool3d_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_batch_dim(tensor *out__, tensor self, int64_t batch_dim, int64_t level) { PROTECT( auto outputs__ = torch::_add_batch_dim(*self, batch_dim, level); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_relu(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_add_relu(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_relu_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_add_relu_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_relu_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_add_relu_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_relu_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::_add_relu(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__add_relu_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::_add_relu_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__aminmax(tensor *out__, tensor self) { @@ -275,9 +275,9 @@ int atg__aminmax(tensor *out__, tensor self) { auto outputs__ = torch::_aminmax(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__aminmax_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -285,198 +285,198 @@ int atg__aminmax_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::_aminmax(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__amp_update_scale_(tensor *out__, tensor self, tensor growth_tracker, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { PROTECT( auto outputs__ = torch::_amp_update_scale_(*self, *growth_tracker, *found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__baddbmm_mkl_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::_baddbmm_mkl_(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_byte(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Byte(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_char(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Char(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_double(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Double(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_float(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Float(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_half(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Half(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_int(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Int(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_long(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Long(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cast_short(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Short(*self, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::_cat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::_cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cdist_backward(tensor *out__, tensor grad, tensor x1, tensor x2, double p, tensor cdist) { PROTECT( auto outputs__ = torch::_cdist_backward(*grad, *x1, *x2, p, *cdist); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cholesky_solve_helper(tensor *out__, tensor self, tensor A, int upper) { PROTECT( auto outputs__ = torch::_cholesky_solve_helper(*self, *A, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__coalesce(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_coalesce(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__coalesced_(tensor *out__, tensor self, int coalesced) { PROTECT( auto outputs__ = self->_coalesced_((bool)coalesced); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__compute_linear_combination(tensor *out__, tensor input, tensor coefficients) { PROTECT( auto outputs__ = torch::_compute_linear_combination(*input, *coefficients); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__compute_linear_combination_out(tensor *out__, tensor out, tensor input, tensor coefficients) { PROTECT( auto outputs__ = torch::_compute_linear_combination_out(*out, *input, *coefficients); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__conj(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_conj(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__conj_physical(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_conj_physical(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__conv_depthwise2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::_conv_depthwise2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__conv_depthwise2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { @@ -484,90 +484,90 @@ int atg__conv_depthwise2d_backward(tensor *out__, tensor grad_input, tensor grad auto outputs__ = torch::_conv_depthwise2d_backward_out(*grad_input, *grad_weight, *grad_output, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__conv_depthwise2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::_conv_depthwise2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convert_indices_from_coo_to_csr(tensor *out__, tensor self, int64_t size, int out_int32) { PROTECT( auto outputs__ = torch::_convert_indices_from_coo_to_csr(*self, size, (bool)out_int32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convert_indices_from_coo_to_csr_out(tensor *out__, tensor out, tensor self, int64_t size, int out_int32) { PROTECT( auto outputs__ = torch::_convert_indices_from_coo_to_csr_out(*out, *self, size, (bool)out_int32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32) { PROTECT( auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convolution_deprecated(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled) { PROTECT( auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convolution_mode(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::_convolution_mode(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__convolution_nogroup(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len) { PROTECT( auto outputs__ = torch::_convolution_nogroup(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__copy_from(tensor *out__, tensor self, tensor dst, int non_blocking) { PROTECT( auto outputs__ = torch::_copy_from(*self, *dst, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__copy_from_and_resize(tensor *out__, tensor self, tensor dst) { PROTECT( auto outputs__ = torch::_copy_from_and_resize(*self, *dst); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int zero_infinity) { @@ -575,18 +575,18 @@ int atg__ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *inpu auto outputs__ = torch::_ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, (bool)zero_infinity); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__ctc_loss_backward(tensor *out__, tensor grad, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, tensor neg_log_likelihood, tensor log_alpha, int64_t blank, int zero_infinity) { PROTECT( auto outputs__ = torch::_ctc_loss_backward(*grad, *log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), *neg_log_likelihood, *log_alpha, blank, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cudnn_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int deterministic, int zero_infinity) { @@ -594,18 +594,18 @@ int atg__cudnn_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t auto outputs__ = torch::_cudnn_ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, (bool)deterministic, (bool)zero_infinity); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cudnn_init_dropout_state(tensor *out__, double dropout, int train, int64_t dropout_seed, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_cudnn_init_dropout_state(dropout, (bool)train, dropout_seed, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cudnn_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor weight_buf, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { @@ -616,18 +616,18 @@ int atg__cudnn_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_ out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__cudnn_rnn_flatten_weight(tensor *out__, tensor *weight_arr_data, int weight_arr_len, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, int batch_first, int bidirectional) { PROTECT( auto outputs__ = torch::_cudnn_rnn_flatten_weight(of_carray_tensor(weight_arr_data, weight_arr_len), weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, (bool)batch_first, (bool)bidirectional); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__det_lu_based_helper(tensor *out__, tensor self) { @@ -636,36 +636,36 @@ int atg__det_lu_based_helper(tensor *out__, tensor self) { out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__det_lu_based_helper_backward_helper(tensor *out__, tensor det_grad, tensor det, tensor self, tensor lu, tensor pivs) { PROTECT( auto outputs__ = torch::_det_lu_based_helper_backward_helper(*det_grad, *det, *self, *lu, *pivs); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__dim_arange(tensor *out__, tensor like, int64_t dim) { PROTECT( auto outputs__ = torch::_dim_arange(*like, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__dirichlet_grad(tensor *out__, tensor x, tensor alpha, tensor total) { PROTECT( auto outputs__ = torch::_dirichlet_grad(*x, *alpha, *total); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { @@ -675,27 +675,27 @@ int atg__embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offs out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag_backward(tensor *out__, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int64_t padding_idx) { PROTECT( auto outputs__ = torch::_embedding_bag_backward(*grad, *indices, *offsets, *offset2bag, *bag_size, *maximum_indices, num_weights, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag_dense_backward(tensor *out__, tensor grad, tensor indices, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx) { PROTECT( auto outputs__ = torch::_embedding_bag_dense_backward(*grad, *indices, *offset2bag, *bag_size, *maximum_indices, num_weights, (bool)scale_grad_by_freq, mode, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag_forward_only(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { @@ -705,63 +705,63 @@ int atg__embedding_bag_forward_only(tensor *out__, tensor weight, tensor indices out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag_per_sample_weights_backward(tensor *out__, tensor grad, tensor weight, tensor indices, tensor offsets, tensor offset2bag, int64_t mode, int64_t padding_idx) { PROTECT( auto outputs__ = torch::_embedding_bag_per_sample_weights_backward(*grad, *weight, *indices, *offsets, *offset2bag, mode, padding_idx); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__embedding_bag_sparse_backward(tensor *out__, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights, int64_t padding_idx) { PROTECT( auto outputs__ = torch::_embedding_bag_sparse_backward(*grad, *indices, *offsets, *offset2bag, *bag_size, num_weights, (bool)scale_grad_by_freq, mode, (per_sample_weights ? *per_sample_weights : torch::Tensor()), padding_idx); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__empty_affine_quantized(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device, double scale, int64_t zero_point) { PROTECT( auto outputs__ = torch::_empty_affine_quantized(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), scale, zero_point); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__empty_per_channel_affine_quantized(tensor *out__, int64_t *size_data, int size_len, tensor scales, tensor zero_points, int64_t axis, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_empty_per_channel_affine_quantized(torch::IntArrayRef(size_data, size_len), *scales, *zero_points, axis, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__euclidean_dist(tensor *out__, tensor x1, tensor x2) { PROTECT( auto outputs__ = torch::_euclidean_dist(*x1, *x2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fake_quantize_learnable_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( auto outputs__ = torch::_fake_quantize_learnable_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max, grad_factor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fake_quantize_learnable_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) { @@ -770,18 +770,18 @@ int atg__fake_quantize_learnable_per_channel_affine_backward(tensor *out__, tens out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fake_quantize_learnable_per_tensor_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { PROTECT( auto outputs__ = torch::_fake_quantize_learnable_per_tensor_affine(*self, *scale, *zero_point, quant_min, quant_max, grad_factor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) { @@ -790,9 +790,9 @@ int atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *out__, tenso out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, tensor fake_quant_enabled, int64_t quant_min, int64_t quant_max) { @@ -800,63 +800,63 @@ int atg__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(tensor *out__, auto outputs__ = torch::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(*self, *scale, *zero_point, *fake_quant_enabled, quant_min, quant_max); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_c2c(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward) { PROTECT( auto outputs__ = torch::_fft_c2c(*self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)forward); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_c2c_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int forward) { PROTECT( auto outputs__ = torch::_fft_c2c_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)forward); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_c2r(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size) { PROTECT( auto outputs__ = torch::_fft_c2r(*self, torch::IntArrayRef(dim_data, dim_len), normalization, last_dim_size); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_c2r_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int64_t last_dim_size) { PROTECT( auto outputs__ = torch::_fft_c2r_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, last_dim_size); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_r2c(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided) { PROTECT( auto outputs__ = torch::_fft_r2c(*self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)onesided); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fft_r2c_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t normalization, int onesided) { PROTECT( auto outputs__ = torch::_fft_r2c_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), normalization, (bool)onesided); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fused_dropout(tensor *out__, tensor self, double p) { @@ -864,9 +864,9 @@ int atg__fused_dropout(tensor *out__, tensor self, double p) { auto outputs__ = torch::_fused_dropout(*self, p); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fused_moving_avg_obs_fq_helper(tensor *out__, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant) { @@ -874,36 +874,36 @@ int atg__fused_moving_avg_obs_fq_helper(tensor *out__, tensor self, tensor obser auto outputs__ = torch::_fused_moving_avg_obs_fq_helper(*self, *observer_on, *fake_quant_on, *running_min, *running_max, *scale, *zero_point, averaging_const, quant_min, quant_max, ch_axis, (bool)per_row_fake_quant, (bool)symmetric_quant); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__fw_primal(tensor *out__, tensor self, int64_t level) { PROTECT( auto outputs__ = self->_fw_primal(level); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__gather_sparse_backward(tensor *out__, tensor self, int64_t dim, tensor index, tensor grad) { PROTECT( auto outputs__ = torch::_gather_sparse_backward(*self, dim, *index, *grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__grid_sampler_2d_cpu_fallback(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::_grid_sampler_2d_cpu_fallback(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__grid_sampler_2d_cpu_fallback_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { @@ -911,54 +911,54 @@ int atg__grid_sampler_2d_cpu_fallback_backward(tensor *out__, tensor grad_output auto outputs__ = torch::_grid_sampler_2d_cpu_fallback_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::_index_copy_(*self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__index_put_impl_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate, int unsafe) { PROTECT( auto outputs__ = torch::_index_put_impl_(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate, (bool)unsafe); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__indices(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->_indices(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__inverse_helper(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_inverse_helper(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__linalg_inv_out_helper_(tensor *out__, tensor self, tensor infos_lu, tensor infos_getri) { PROTECT( auto outputs__ = torch::_linalg_inv_out_helper_(*self, *infos_lu, *infos_getri); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__linalg_qr_helper(tensor *out__, tensor self, char * mode) { @@ -966,63 +966,63 @@ int atg__linalg_qr_helper(tensor *out__, tensor self, char * mode) { auto outputs__ = torch::_linalg_qr_helper(*self, std::string(mode)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_log_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_log_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__log_softmax_backward_data_out(tensor *out__, tensor out, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_log_softmax_backward_data_out(*out, *grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__log_softmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_log_softmax_out(*out, *self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__logcumsumexp(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::_logcumsumexp(*self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::_logcumsumexp_out(*out, *self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__lu_with_info(tensor *out__, tensor self, int pivot, int check_errors) { @@ -1031,108 +1031,108 @@ int atg__lu_with_info(tensor *out__, tensor self, int pivot, int check_errors) { out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__make_dual(tensor *out__, tensor primal, tensor tangent, int64_t level) { PROTECT( auto outputs__ = torch::_make_dual(*primal, *tangent, level); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__make_per_channel_quantized_tensor(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis) { PROTECT( auto outputs__ = torch::_make_per_channel_quantized_tensor(*self, *scale, *zero_point, axis); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__make_per_tensor_quantized_tensor(tensor *out__, tensor self, double scale, int64_t zero_point) { PROTECT( auto outputs__ = torch::_make_per_tensor_quantized_tensor(*self, scale, zero_point); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__masked_scale(tensor *out__, tensor self, tensor mask, double scale) { PROTECT( auto outputs__ = torch::_masked_scale(*self, *mask, scale); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__mkldnn_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( auto outputs__ = torch::_mkldnn_reshape(*self, torch::IntArrayRef(shape_data, shape_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__mkldnn_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::_mkldnn_transpose(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__mkldnn_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::_mkldnn_transpose_(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__neg_view(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_neg_view(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__nnpack_spatial_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::_nnpack_spatial_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__nnpack_spatial_convolution_backward_input(tensor *out__, tensor input, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::_nnpack_spatial_convolution_backward_input(*input, *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__nnpack_spatial_convolution_backward_weight(tensor *out__, tensor input, int64_t *weightsize_data, int weightsize_len, tensor grad_output, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::_nnpack_spatial_convolution_backward_weight(*input, torch::IntArrayRef(weightsize_data, weightsize_len), *grad_output, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__pack_padded_sequence(tensor *out__, tensor input, tensor lengths, int batch_first) { @@ -1140,18 +1140,18 @@ int atg__pack_padded_sequence(tensor *out__, tensor input, tensor lengths, int b auto outputs__ = torch::_pack_padded_sequence(*input, *lengths, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__pack_padded_sequence_backward(tensor *out__, tensor grad, int64_t *input_size_data, int input_size_len, tensor batch_sizes, int batch_first) { PROTECT( auto outputs__ = torch::_pack_padded_sequence_backward(*grad, torch::IntArrayRef(input_size_data, input_size_len), *batch_sizes, (bool)batch_first); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__pad_packed_sequence(tensor *out__, tensor data, tensor batch_sizes, int batch_first, scalar padding_value, int64_t total_length) { @@ -1159,54 +1159,54 @@ int atg__pad_packed_sequence(tensor *out__, tensor data, tensor batch_sizes, int auto outputs__ = torch::_pad_packed_sequence(*data, *batch_sizes, (bool)batch_first, *padding_value, total_length); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__pdist_backward(tensor *out__, tensor grad, tensor self, double p, tensor pdist) { PROTECT( auto outputs__ = torch::_pdist_backward(*grad, *self, p, *pdist); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__pin_memory(tensor *out__, tensor self, int device) { PROTECT( auto outputs__ = torch::_pin_memory(*self, device_of_int(device)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__remove_batch_dim(tensor *out__, tensor self, int64_t level, int64_t batch_size, int64_t out_dim) { PROTECT( auto outputs__ = torch::_remove_batch_dim(*self, level, batch_size, out_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__reshape_alias(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::_reshape_alias(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__reshape_from_tensor(tensor *out__, tensor self, tensor shape) { PROTECT( auto outputs__ = torch::_reshape_from_tensor(*self, *shape); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__rowwise_prune(tensor *out__, tensor weight, tensor mask, int compressed_indices_dtype) { @@ -1214,54 +1214,54 @@ int atg__rowwise_prune(tensor *out__, tensor weight, tensor mask, int compressed auto outputs__ = torch::_rowwise_prune(*weight, *mask, torch::ScalarType(compressed_indices_dtype)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__s_where(tensor *out__, tensor condition, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_s_where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sample_dirichlet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_sample_dirichlet(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__saturate_weight_to_fp16(tensor *out__, tensor weight) { PROTECT( auto outputs__ = torch::_saturate_weight_to_fp16(*weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__segment_reduce_backward(tensor *out__, tensor grad, tensor output, tensor data, char * reduce, tensor lengths, int64_t axis) { PROTECT( auto outputs__ = torch::_segment_reduce_backward(*grad, *output, *data, std::string(reduce), (lengths ? *lengths : torch::Tensor()), axis); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__shape_as_tensor(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_shape_as_tensor(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__slow_conv2d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, tensor finput) { @@ -1270,9 +1270,9 @@ int atg__slow_conv2d_backward(tensor *out__, tensor grad_input, tensor grad_weig out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sobol_engine_draw(tensor *out__, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype) { @@ -1280,72 +1280,72 @@ int atg__sobol_engine_draw(tensor *out__, tensor quasi, int64_t n, tensor sobols auto outputs__ = torch::_sobol_engine_draw(*quasi, n, *sobolstate, dimension, num_generated, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sobol_engine_ff_(tensor *out__, tensor self, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated) { PROTECT( auto outputs__ = torch::_sobol_engine_ff_(*self, n, *sobolstate, dimension, num_generated); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sobol_engine_initialize_state_(tensor *out__, tensor self, int64_t dimension) { PROTECT( auto outputs__ = torch::_sobol_engine_initialize_state_(*self, dimension); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sobol_engine_scramble_(tensor *out__, tensor self, tensor ltm, int64_t dimension) { PROTECT( auto outputs__ = torch::_sobol_engine_scramble_(*self, *ltm, dimension); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__softmax_backward_data_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_softmax_backward_data_out(*grad_input, *grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__softmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_softmax_out(*out, *self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__solve_helper(tensor *out__, tensor self, tensor A) { @@ -1353,216 +1353,216 @@ int atg__solve_helper(tensor *out__, tensor self, tensor A) { auto outputs__ = torch::_solve_helper(*self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_addmm(tensor *out__, tensor self, tensor sparse, tensor dense) { PROTECT( auto outputs__ = torch::_sparse_addmm(*self, *sparse, *dense); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_coo_tensor_unsafe(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_sparse_coo_tensor_unsafe(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_coo_tensor_with_dims(tensor *out__, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_coo_tensor_with_dims_and_tensors(tensor *out__, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, tensor indices, tensor values, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, torch::IntArrayRef(size_data, size_len), *indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_csr_tensor_unsafe(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::_sparse_csr_tensor_unsafe(*crow_indices, *col_indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_log_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_sparse_log_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_sparse_log_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_log_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::_sparse_log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_mask_helper(tensor *out__, tensor t, tensor mask_indices) { PROTECT( auto outputs__ = torch::_sparse_mask_helper(*t, *mask_indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_mm(tensor *out__, tensor sparse, tensor dense) { PROTECT( auto outputs__ = torch::_sparse_mm(*sparse, *dense); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_softmax(tensor *out__, tensor self, int64_t dim, int half_to_float) { PROTECT( auto outputs__ = torch::_sparse_softmax(*self, dim, (bool)half_to_float); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { PROTECT( auto outputs__ = torch::_sparse_softmax_backward_data(*grad_output, *output, dim, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_softmax_int(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::_sparse_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sparse_matmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_sparse_sparse_matmul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sum(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_sparse_sum(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sum_backward(tensor *out__, tensor grad, tensor self, int64_t *dim_data, int dim_len) { PROTECT( auto outputs__ = torch::_sparse_sum_backward(*grad, *self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sum_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { PROTECT( auto outputs__ = torch::_sparse_sum(*self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sum_dim_dtype(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int dtype) { PROTECT( auto outputs__ = torch::_sparse_sum(*self, torch::IntArrayRef(dim_data, dim_len), torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__sparse_sum_dtype(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::_sparse_sum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::_stack(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::_stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__standard_gamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_standard_gamma(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__standard_gamma_grad(tensor *out__, tensor self, tensor output) { PROTECT( auto outputs__ = torch::_standard_gamma_grad(*self, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__svd_helper(tensor *out__, tensor self, int some, int compute_uv) { @@ -1571,9 +1571,9 @@ int atg__svd_helper(tensor *out__, tensor self, int some, int compute_uv) { out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__symeig_helper(tensor *out__, tensor self, int eigenvectors, int upper) { @@ -1581,63 +1581,63 @@ int atg__symeig_helper(tensor *out__, tensor self, int eigenvectors, int upper) auto outputs__ = torch::_symeig_helper(*self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_ambiguous_defaults(tensor *out__, tensor dummy, int64_t a, int64_t b) { PROTECT( auto outputs__ = torch::_test_ambiguous_defaults(*dummy, a, b); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_ambiguous_defaults_b(tensor *out__, tensor dummy, int64_t a, char * b) { PROTECT( auto outputs__ = torch::_test_ambiguous_defaults(*dummy, a, std::string(b)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_optional_filled_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { PROTECT( auto outputs__ = torch::_test_optional_filled_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_optional_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { PROTECT( auto outputs__ = torch::_test_optional_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_serialization_subcmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::_test_serialization_subcmul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__test_string_default(tensor *out__, tensor dummy, char * a, char * b) { PROTECT( auto outputs__ = torch::_test_string_default(*dummy, std::string(a), std::string(b)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_differentiable_gru_cell_backward(tensor *out__, tensor grad_hy, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias) { @@ -1648,9 +1648,9 @@ int atg__thnn_differentiable_gru_cell_backward(tensor *out__, tensor grad_hy, te out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_differentiable_lstm_cell_backward(tensor *out__, tensor grad_hy, tensor grad_cy, tensor input_gates, tensor hidden_gates, tensor input_bias, tensor hidden_bias, tensor cx, tensor cy) { @@ -1661,9 +1661,9 @@ int atg__thnn_differentiable_lstm_cell_backward(tensor *out__, tensor grad_hy, t out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_fused_gru_cell(tensor *out__, tensor input_gates, tensor hidden_gates, tensor hx, tensor input_bias, tensor hidden_bias) { @@ -1671,9 +1671,9 @@ int atg__thnn_fused_gru_cell(tensor *out__, tensor input_gates, tensor hidden_ga auto outputs__ = torch::_thnn_fused_gru_cell(*input_gates, *hidden_gates, *hx, (input_bias ? *input_bias : torch::Tensor()), (hidden_bias ? *hidden_bias : torch::Tensor())); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_fused_gru_cell_backward(tensor *out__, tensor grad_hy, tensor workspace, int has_bias) { @@ -1684,9 +1684,9 @@ int atg__thnn_fused_gru_cell_backward(tensor *out__, tensor grad_hy, tensor work out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_fused_lstm_cell(tensor *out__, tensor input_gates, tensor hidden_gates, tensor cx, tensor input_bias, tensor hidden_bias) { @@ -1695,9 +1695,9 @@ int atg__thnn_fused_lstm_cell(tensor *out__, tensor input_gates, tensor hidden_g out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__thnn_fused_lstm_cell_backward(tensor *out__, tensor grad_hy, tensor grad_cy, tensor cx, tensor cy, tensor workspace, int has_bias) { @@ -1708,41 +1708,39 @@ int atg__thnn_fused_lstm_cell_backward(tensor *out__, tensor grad_hy, tensor gra out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__to_copy(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking) { PROTECT( auto outputs__ = torch::_to_copy(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__to_cpu(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::_to_cpu(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__trilinear(tensor *out__, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim) { PROTECT( auto outputs__ = torch::_trilinear(*i1, *i2, *i3, torch::IntArrayRef(expand1_data, expand1_len), torch::IntArrayRef(expand2_data, expand2_len), torch::IntArrayRef(expand3_data, expand3_len), torch::IntArrayRef(sumdim_data, sumdim_len), unroll_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__unique(tensor *out__, tensor self, int sorted, int return_inverse) { @@ -1750,9 +1748,9 @@ int atg__unique(tensor *out__, tensor self, int sorted, int return_inverse) { auto outputs__ = torch::_unique(*self, (bool)sorted, (bool)return_inverse); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__unique2(tensor *out__, tensor self, int sorted, int return_inverse, int return_counts) { @@ -1761,9 +1759,9 @@ int atg__unique2(tensor *out__, tensor self, int sorted, int return_inverse, int out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__unpack_dual(tensor *out__, tensor dual, int64_t level) { @@ -1771,36 +1769,36 @@ int atg__unpack_dual(tensor *out__, tensor dual, int64_t level) { auto outputs__ = torch::_unpack_dual(*dual, level); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__unsafe_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::_unsafe_view(*self, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__values(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->_values(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__weight_norm(tensor *out__, tensor v, tensor g, int64_t dim) { PROTECT( auto outputs__ = torch::_weight_norm(*v, *g, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__weight_norm_cuda_interface(tensor *out__, tensor v, tensor g, int64_t dim) { @@ -1808,9 +1806,9 @@ int atg__weight_norm_cuda_interface(tensor *out__, tensor v, tensor g, int64_t d auto outputs__ = torch::_weight_norm_cuda_interface(*v, *g, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__weight_norm_cuda_interface_backward(tensor *out__, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim) { @@ -1818,9 +1816,9 @@ int atg__weight_norm_cuda_interface_backward(tensor *out__, tensor grad_w, tenso auto outputs__ = torch::_weight_norm_cuda_interface_backward(*grad_w, *saved_v, *saved_g, *saved_norms, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg__weight_norm_differentiable_backward(tensor *out__, tensor grad_w, tensor saved_v, tensor saved_g, tensor saved_norms, int64_t dim) { @@ -1828,171 +1826,171 @@ int atg__weight_norm_differentiable_backward(tensor *out__, tensor grad_w, tenso auto outputs__ = torch::_weight_norm_differentiable_backward(*grad_w, *saved_v, *saved_g, *saved_norms, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_abs(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::abs(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_abs_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::abs_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_abs_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::abs_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_absolute(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::absolute(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_absolute_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->absolute_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_absolute_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::absolute_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acos(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acos_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acos_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acos_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::acos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acosh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acosh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acosh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acosh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_acosh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::acosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { @@ -2000,9 +1998,9 @@ int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_dat auto outputs__ = torch::adaptive_max_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { @@ -2010,27 +2008,27 @@ int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_dat auto outputs__ = torch::adaptive_max_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { @@ -2038,9 +2036,9 @@ int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tenso auto outputs__ = torch::adaptive_max_pool2d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { @@ -2048,27 +2046,27 @@ int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_dat auto outputs__ = torch::adaptive_max_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { @@ -2076,356 +2074,354 @@ int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tenso auto outputs__ = torch::adaptive_max_pool3d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_add(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_add_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::add_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_add_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_add_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::addbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = self->addbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::addbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcdiv(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = self->addcdiv_(*tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcdiv_out(*out, *self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcmul(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = self->addcmul_(*tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcmul_out(*out, *self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::addmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = self->addmm_(*mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::addmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv(*self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv_(*self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv_out(*out, *self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = torch::addr(*self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = self->addr_(*vec1, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = torch::addr_out(*out, *self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { PROTECT( auto outputs__ = torch::affine_grid_generator(*theta, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size_data, int size_len, int align_corners) { PROTECT( auto outputs__ = torch::affine_grid_generator_backward(*grad, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_alias(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::alias(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_align_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->align_as(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::align_tensors(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_all(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::all(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_all_all_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::all_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_all_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::all(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::all_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::alpha_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::alpha_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_amax(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::amax(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_amax_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::amax_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_amin(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::amin(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_amin_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::amin_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_aminmax(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -2433,9 +2429,9 @@ int atg_aminmax(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::aminmax(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_aminmax_out(tensor *out__, tensor min, tensor max, tensor self, int64_t dim, int keepdim) { @@ -2443,681 +2439,675 @@ int atg_aminmax_out(tensor *out__, tensor min, tensor max, tensor self, int64_t auto outputs__ = torch::aminmax_out(*min, *max, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_angle(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::angle(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_angle_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::angle_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_any(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::any(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_any_all_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::any_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_any_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::any(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::any_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arange_out(tensor *out__, tensor out, scalar end) { PROTECT( auto outputs__ = torch::arange_out(*out, *end); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arange_start(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arange_start_out(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( auto outputs__ = torch::arange_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arange_start_step(tensor *out__, scalar start, scalar end, scalar step, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*start, *end, *step, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arccos(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccos_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arccos_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccos_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arccos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccosh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arccosh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccosh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arccosh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arccosh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arccosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arcsin(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsin_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arcsin_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsin_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arcsin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsinh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arcsinh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsinh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arcsinh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arcsinh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arcsinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arctan(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arctan_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctan_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arctan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arctanh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::arctanh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_arctanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::arctanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmax(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_argmax_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmax_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmin(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_argmin_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmin_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { PROTECT( auto outputs__ = torch::argsort(*self, dim, (bool)descending); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asin(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asin_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asin_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asin_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::asin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asinh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asinh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asinh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asinh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_asinh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::asinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atan(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan2(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::atan2(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan2_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->atan2_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::atan2_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atan_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atan_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::atan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atanh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atanh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::atanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_1d(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atleast_1d(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_1d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::atleast_1d(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_2d(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atleast_2d(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_2d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::atleast_2d(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_3d(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atleast_3d(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_atleast_3d_sequence(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::atleast_3d(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { PROTECT( auto outputs__ = torch::avg_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::baddbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = self->baddbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::baddbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::bartlett_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bartlett_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::bartlett_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu, tensor count) { PROTECT( auto outputs__ = torch::batch_norm_backward_elemt(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), *mean_dy, *mean_dy_xmu, *count); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g) { @@ -3127,27 +3117,27 @@ int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( auto outputs__ = torch::batch_norm_elemt(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( auto outputs__ = torch::batch_norm_elemt_out(*out, *input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count) { @@ -3155,9 +3145,9 @@ int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor auto outputs__ = torch::batch_norm_gather_stats(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, count); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, tensor counts) { @@ -3165,9 +3155,9 @@ int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, *counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { @@ -3175,9 +3165,9 @@ int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { auto outputs__ = torch::batch_norm_stats(*input, eps); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean, tensor running_var, double momentum) { @@ -3185,716 +3175,714 @@ int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean auto outputs__ = torch::batch_norm_update_stats(*input, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bernoulli(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::bernoulli(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bernoulli_(tensor *out__, tensor self, tensor p) { PROTECT( auto outputs__ = self->bernoulli_(*p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bernoulli_float_(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = self->bernoulli_(p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::bernoulli_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bernoulli_p(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = torch::bernoulli(*self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::bilinear(*input1, *input2, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy(*self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_with_logits(*self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_with_logits_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) { PROTECT( auto outputs__ = torch::bincount(*self, (weights ? *weights : torch::Tensor()), minlength); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_binomial(tensor *out__, tensor count, tensor prob) { PROTECT( auto outputs__ = torch::binomial(*count, *prob); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_and(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_and_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_and_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_and(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_and_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_and_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_and_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_left_shift_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_left_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_tensor_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_left_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_tensor_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_left_shift_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_left_shift_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_left_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_not(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::bitwise_not(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_not_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->bitwise_not_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::bitwise_not_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_or(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_or_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_or_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_or(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_or_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_or_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_or_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_right_shift_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_right_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_tensor_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_right_shift(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_tensor_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_right_shift_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_right_shift_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_right_shift_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bitwise_xor_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::blackman_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_blackman_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::blackman_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_block_diag(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::block_diag(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bmm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::bmm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::bmm_out(*out, *self, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_broadcast_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::broadcast_tensors(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_broadcast_to(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::broadcast_to(*self, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bucketize(tensor *out__, tensor self, tensor boundaries, int out_int32, int right) { PROTECT( auto outputs__ = torch::bucketize(*self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bucketize_scalar(tensor *out__, scalar self, tensor boundaries, int out_int32, int right) { PROTECT( auto outputs__ = torch::bucketize(*self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_bucketize_tensor_out(tensor *out__, tensor out, tensor self, tensor boundaries, int out_int32, int right) { PROTECT( auto outputs__ = torch::bucketize_out(*out, *self, *boundaries, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::cartesian_prod(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::cat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { PROTECT( auto outputs__ = self->cauchy_(median, sigma); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { PROTECT( auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ceil(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ceil(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ceil_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ceil_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ceil_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::ceil_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_celu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::celu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_celu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::celu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { PROTECT( auto outputs__ = torch::chain_matmul(of_carray_tensor(matrices_data, matrices_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_chain_matmul_out(tensor *out__, tensor out, tensor *matrices_data, int matrices_len) { PROTECT( auto outputs__ = torch::chain_matmul_out(*out, of_carray_tensor(matrices_data, matrices_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_channel_shuffle(tensor *out__, tensor self, int64_t groups) { PROTECT( auto outputs__ = torch::channel_shuffle(*self, groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_inverse(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_inverse_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { PROTECT( auto outputs__ = torch::cholesky_solve(*self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2, int upper) { PROTECT( auto outputs__ = torch::cholesky_solve_out(*out, *self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_choose_qparams_optimized(tensor *out__, tensor input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { @@ -3902,482 +3890,480 @@ int atg_choose_qparams_optimized(tensor *out__, tensor input, int64_t numel, int auto outputs__ = torch::choose_qparams_optimized(*input, numel, n_bins, ratio, bit_width); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { PROTECT( auto outputs__ = torch::chunk(*self, chunks, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp_(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max(tensor *out__, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max(*self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max_(tensor *out__, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max_(*self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max_out(*out, *self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max_tensor(tensor *out__, tensor self, tensor max) { PROTECT( auto outputs__ = torch::clamp_max(*self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max_tensor_(tensor *out__, tensor self, tensor max) { PROTECT( auto outputs__ = torch::clamp_max_(*self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_max_tensor_out(tensor *out__, tensor out, tensor self, tensor max) { PROTECT( auto outputs__ = torch::clamp_max_out(*out, *self, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min(tensor *out__, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min(*self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min_(tensor *out__, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min_(*self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min_out(*out, *self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min_tensor(tensor *out__, tensor self, tensor min) { PROTECT( auto outputs__ = torch::clamp_min(*self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min_tensor_(tensor *out__, tensor self, tensor min) { PROTECT( auto outputs__ = torch::clamp_min_(*self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_min_tensor_out(tensor *out__, tensor out, tensor self, tensor min) { PROTECT( auto outputs__ = torch::clamp_min_out(*out, *self, *min); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp_out(*out, *self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_tensor(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clamp(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_tensor_(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clamp_(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clamp_tensor_out(tensor *out__, tensor out, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clamp_out(*out, *self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clip(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip_(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clip_(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clip_out(*out, *self, *min, *max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip_tensor(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clip(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip_tensor_(tensor *out__, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clip_(*self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clip_tensor_out(tensor *out__, tensor out, tensor self, tensor min, tensor max) { PROTECT( auto outputs__ = torch::clip_out(*out, *self, (min ? *min : torch::Tensor()), (max ? *max : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_clone(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::clone(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_coalesce(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->coalesce(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im(*self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_backward(*grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_col2im_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_backward_out(*grad_input, *grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_col_indices(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->col_indices(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_column_stack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::column_stack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_column_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::column_stack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement) { PROTECT( auto outputs__ = torch::combinations(*self, r, (bool)with_replacement); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_complex(tensor *out__, tensor real, tensor imag) { PROTECT( auto outputs__ = torch::complex(*real, *imag); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_complex_out(tensor *out__, tensor out, tensor real, tensor imag) { PROTECT( auto outputs__ = torch::complex_out(*out, *real, *imag); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_concat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::concat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_concat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::concat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conj(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::conj(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conj_physical(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::conj_physical(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conj_physical_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::conj_physical_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conj_physical_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::conj_physical_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_len) { PROTECT( auto outputs__ = torch::constant_pad_nd(*self, torch::IntArrayRef(pad_data, pad_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_contiguous(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->contiguous(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv1d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv2d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv3d_padding(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, char * padding, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), std::string(padding), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_depthwise3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_depthwise3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_depthwise3d_backward(tensor *out__, tensor grad_input, tensor grad_weight, tensor grad_bias, tensor grad_output, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { @@ -4386,18 +4372,18 @@ int atg_conv_depthwise3d_backward(tensor *out__, tensor grad_input, tensor grad_ out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { PROTECT( auto outputs__ = torch::conv_tbc(*self, *weight, *bias, pad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weight, tensor bias, int64_t pad) { @@ -4406,279 +4392,279 @@ int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weigh out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( auto outputs__ = torch::convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( auto outputs__ = torch::convolution_overrideable(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_blocking) { PROTECT( auto outputs__ = torch::copy_sparse_to_sparse_(*self, *src, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::copysign(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->copysign_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::copysign_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::copysign(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->copysign_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_copysign_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::copysign_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_corrcoef(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::corrcoef(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cos(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cos_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cos_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cos_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::cos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cosh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cosh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cosh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cosh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cosh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::cosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::cosine_embedding_loss(*input1, *input2, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, double eps) { PROTECT( auto outputs__ = torch::cosine_similarity(*x1, *x2, dim, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cov(tensor *out__, tensor self, int64_t correction, tensor fweights, tensor aweights) { PROTECT( auto outputs__ = torch::cov(*self, correction, (fweights ? *fweights : torch::Tensor()), (aweights ? *aweights : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { PROTECT( auto outputs__ = torch::cross(*self, *other, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cross_entropy_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, double label_smoothing) { PROTECT( auto outputs__ = torch::cross_entropy_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, label_smoothing); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { PROTECT( auto outputs__ = torch::cross_out(*out, *self, *other, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_crow_indices(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->crow_indices(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity) { PROTECT( auto outputs__ = torch::ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, reduction, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ctc_loss_tensor(tensor *out__, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity) { PROTECT( auto outputs__ = torch::ctc_loss(*log_probs, *targets, *input_lengths, *target_lengths, blank, reduction, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W) { PROTECT( auto outputs__ = torch::cudnn_affine_grid_generator(*theta, n, C, H, W); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W) { PROTECT( auto outputs__ = torch::cudnn_affine_grid_generator_backward(*grad, n, C, H, W); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { @@ -4688,9 +4674,9 @@ int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace) { @@ -4699,126 +4685,126 @@ int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_outpu out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_add_relu(tensor *out__, tensor self, tensor weight, tensor z, scalar alpha, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::cudnn_convolution_add_relu(*self, *weight, *z, *alpha, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_relu(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::cudnn_convolution_relu(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_transpose_deprecated(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_convolution_transpose_deprecated2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { PROTECT( auto outputs__ = torch::cudnn_grid_sampler(*self, *grid); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, tensor grad_output) { @@ -4826,9 +4812,9 @@ int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, ten auto outputs__ = torch::cudnn_grid_sampler_backward(*self, *grid, *grad_output); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cummax(tensor *out__, tensor self, int64_t dim) { @@ -4836,9 +4822,9 @@ int atg_cummax(tensor *out__, tensor self, int64_t dim) { auto outputs__ = torch::cummax(*self, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cummax_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim) { @@ -4846,18 +4832,18 @@ int atg_cummax_out(tensor *out__, tensor values, tensor indices, tensor self, in auto outputs__ = torch::cummax_out(*values, *indices, *self, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cummaxmin_backward(tensor *out__, tensor grad, tensor input, tensor indices, int64_t dim) { PROTECT( auto outputs__ = torch::cummaxmin_backward(*grad, *input, *indices, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cummin(tensor *out__, tensor self, int64_t dim) { @@ -4865,9 +4851,9 @@ int atg_cummin(tensor *out__, tensor self, int64_t dim) { auto outputs__ = torch::cummin(*self, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cummin_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim) { @@ -4875,555 +4861,549 @@ int atg_cummin_out(tensor *out__, tensor values, tensor indices, tensor self, in auto outputs__ = torch::cummin_out(*values, *indices, *self, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumprod(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumprod_(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = self->cumprod_(dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumprod_backward(tensor *out__, tensor grad, tensor input, int64_t dim, tensor output) { PROTECT( auto outputs__ = torch::cumprod_backward(*grad, *input, dim, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumprod_out(*out, *self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumsum(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumsum_(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = self->cumsum_(dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumsum_out(*out, *self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumulative_trapezoid(tensor *out__, tensor y, int64_t dim) { PROTECT( auto outputs__ = torch::cumulative_trapezoid(*y, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_cumulative_trapezoid_x(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( auto outputs__ = torch::cumulative_trapezoid(*y, *x, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_data(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->data(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_deg2rad(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::deg2rad(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_deg2rad_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::deg2rad_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_deg2rad_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::deg2rad_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dequantize(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::dequantize(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dequantize_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::dequantize(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_det(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::det(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_detach(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::detach(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_detach_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::detach_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diag(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::diag(*self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diag_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t diagonal) { PROTECT( auto outputs__ = torch::diag_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diag_embed(*self, offset, dim1, dim2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::diag_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diagflat(tensor *out__, tensor self, int64_t offset) { PROTECT( auto outputs__ = torch::diagflat(*self, offset); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diagonal(*self, offset, dim1, dim2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diagonal_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diagonal_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), offset, dim1, dim2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diff(tensor *out__, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append) { PROTECT( auto outputs__ = torch::diff(*self, n, dim, (prepend ? *prepend : torch::Tensor()), (append ? *append : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_diff_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, tensor prepend, tensor append) { PROTECT( auto outputs__ = torch::diff_out(*out, *self, n, dim, (prepend ? *prepend : torch::Tensor()), (append ? *append : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_digamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::digamma(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_digamma_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->digamma_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_digamma_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::digamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dist(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::dist(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::div(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->div_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::div_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_out_mode(tensor *out__, tensor out, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = torch::div_out(*out, *self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::div(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->div_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_scalar_mode(tensor *out__, tensor self, scalar other, char * rounding_mode) { PROTECT( auto outputs__ = torch::div(*self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_scalar_mode_(tensor *out__, tensor self, scalar other, char * rounding_mode) { PROTECT( auto outputs__ = self->div_(*other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_tensor_mode(tensor *out__, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = torch::div(*self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_div_tensor_mode_(tensor *out__, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = self->div_(*other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::divide_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_out_mode(tensor *out__, tensor out, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = torch::divide_out(*out, *self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_scalar_mode(tensor *out__, tensor self, scalar other, char * rounding_mode) { PROTECT( auto outputs__ = torch::divide(*self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_scalar_mode_(tensor *out__, tensor self, scalar other, char * rounding_mode) { PROTECT( auto outputs__ = self->divide_(*other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_tensor_mode(tensor *out__, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = torch::divide(*self, *other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_divide_tensor_mode_(tensor *out__, tensor self, tensor other, char * rounding_mode) { PROTECT( auto outputs__ = self->divide_(*other, std::string(rounding_mode)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dot(tensor *out__, tensor self, tensor tensor) { PROTECT( auto outputs__ = torch::dot(*self, *tensor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { PROTECT( auto outputs__ = torch::dot_out(*out, *self, *tensor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dsplit(tensor *out__, tensor self, int64_t sections) { PROTECT( auto outputs__ = torch::dsplit(*self, sections); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { PROTECT( auto outputs__ = torch::dsplit(*self, torch::IntArrayRef(indices_data, indices_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dstack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::dstack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_dstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::dstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eig(tensor *out__, tensor self, int eigenvectors) { @@ -5431,9 +5411,9 @@ int atg_eig(tensor *out__, tensor self, int eigenvectors) { auto outputs__ = torch::eig(*self, (bool)eigenvectors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eig_e(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) { @@ -5441,81 +5421,81 @@ int atg_eig_e(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) auto outputs__ = torch::eig_out(*e, *v, *self, (bool)eigenvectors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_einsum(tensor *out__, char * equation, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::einsum(std::string(equation), of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_elu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::elu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_elu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::elu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result) { PROTECT( auto outputs__ = torch::elu_backward(*grad_output, *alpha, *scale, *input_scale, (bool)is_result, *self_or_result); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_elu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, int is_result, tensor self_or_result) { PROTECT( auto outputs__ = torch::elu_backward_out(*grad_input, *grad_output, *alpha, *scale, *input_scale, (bool)is_result, *self_or_result); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_elu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::elu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse) { PROTECT( auto outputs__ = torch::embedding(*weight, *indices, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse) { PROTECT( auto outputs__ = torch::embedding_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset) { @@ -5525,9 +5505,9 @@ int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offse out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_bag_padding_idx(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset, int64_t padding_idx) { @@ -5537,369 +5517,369 @@ int atg_embedding_bag_padding_idx(tensor *out__, tensor weight, tensor indices, out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { PROTECT( auto outputs__ = torch::embedding_dense_backward(*grad_output, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max_norm, double norm_type) { PROTECT( auto outputs__ = torch::embedding_renorm_(*self, *indices, max_norm, norm_type); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { PROTECT( auto outputs__ = torch::embedding_sparse_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_empty_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::empty_like(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::empty_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_empty_quantized(tensor *out__, int64_t *size_data, int size_len, tensor qtensor, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty_quantized(torch::IntArrayRef(size_data, size_len), *qtensor, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::eq(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->eq_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::eq_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::eq(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->eq_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eq_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::eq_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erf(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erf_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erf_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erf_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfc(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfc_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfc_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erfc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfinv(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfinv(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfinv_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->erfinv_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_erfinv_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erfinv_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp2(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp2(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp2_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp2_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp2_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::exp2_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exp_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::exp_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int implicit) { PROTECT( auto outputs__ = self->expand(torch::IntArrayRef(size_data, size_len), (bool)implicit); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_expand_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->expand_as(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_expm1(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::expm1(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_expm1_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::expm1_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_expm1_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::expm1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_exponential_(tensor *out__, tensor self, double lambd) { PROTECT( auto outputs__ = self->exponential_(lambd); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::eye(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eye_m(tensor *out__, int64_t n, int64_t m, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::eye(n, m, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eye_m_out(tensor *out__, tensor out, int64_t n, int64_t m) { PROTECT( auto outputs__ = torch::eye_out(*out, n, m); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_eye_out(tensor *out__, tensor out, int64_t n) { PROTECT( auto outputs__ = torch::eye_out(*out, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_channel_affine_cachemask(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { @@ -5907,27 +5887,27 @@ int atg_fake_quantize_per_channel_affine_cachemask(tensor *out__, tensor self, t auto outputs__ = torch::fake_quantize_per_channel_affine_cachemask(*self, *scale, *zero_point, axis, quant_min, quant_max); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_channel_affine_cachemask_backward(tensor *out__, tensor grad, tensor mask) { PROTECT( auto outputs__ = torch::fake_quantize_per_channel_affine_cachemask_backward(*grad, *mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, scale, zero_point, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_tensor_affine_cachemask(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { @@ -5935,792 +5915,792 @@ int atg_fake_quantize_per_tensor_affine_cachemask(tensor *out__, tensor self, do auto outputs__ = torch::fake_quantize_per_tensor_affine_cachemask(*self, scale, zero_point, quant_min, quant_max); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_tensor_affine_cachemask_backward(tensor *out__, tensor grad, tensor mask) { PROTECT( auto outputs__ = torch::fake_quantize_per_tensor_affine_cachemask_backward(*grad, *mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fake_quantize_per_tensor_affine_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, *scale, *zero_point, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_weight, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_fp16_weight(*input, *packed_weight, *bias); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, tensor packed_weight, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_fp16_weight_fp32_activation(*input, *packed_weight, *bias); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_int8_weight(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_int8_weight_fp32_activation(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { PROTECT( auto outputs__ = torch::fbgemm_pack_gemm_matrix_fp16(*input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { PROTECT( auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fbgemm_pack_quantized_matrix_kn(tensor *out__, tensor input, int64_t K, int64_t n) { PROTECT( auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input, K, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::feature_alpha_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::feature_alpha_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::feature_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::feature_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_fft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_fft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_fft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_fft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fftfreq(tensor *out__, int64_t n, double d, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::fft_fftfreq(n, d, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fftfreq_out(tensor *out__, tensor out, int64_t n, double d) { PROTECT( auto outputs__ = torch::fft_fftfreq_out(*out, n, d); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_fftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_fftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_fftshift(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { PROTECT( auto outputs__ = torch::fft_fftshift(*self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_hfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_hfft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_hfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_hfft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_ifft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_ifft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_ifft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_ifft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_ifftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_ifftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ifftshift(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { PROTECT( auto outputs__ = torch::fft_ifftshift(*self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ihfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_ihfft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_ihfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_ihfft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_irfft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_irfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_irfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_irfft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_irfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_irfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_irfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfft(tensor *out__, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_rfft(*self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfft2(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_rfft2(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfft2_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_rfft2_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfft_out(tensor *out__, tensor out, tensor self, int64_t n, int64_t dim, char * norm) { PROTECT( auto outputs__ = torch::fft_rfft_out(*out, *self, n, dim, std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfftfreq(tensor *out__, int64_t n, double d, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::fft_rfftfreq(n, d, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfftfreq_out(tensor *out__, tensor out, int64_t n, double d) { PROTECT( auto outputs__ = torch::fft_rfftfreq_out(*out, n, d); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_rfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fft_rfftn_out(tensor *out__, tensor out, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char * norm) { PROTECT( auto outputs__ = torch::fft_rfftn_out(*out, *self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fill_(tensor *out__, tensor self, scalar value) { PROTECT( auto outputs__ = torch::fill_(*self, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) { PROTECT( auto outputs__ = self->fill_diagonal_(*fill_value, (bool)wrap); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fill_tensor_(tensor *out__, tensor self, tensor value) { PROTECT( auto outputs__ = torch::fill_(*self, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fix(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::fix(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fix_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::fix_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fix_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::fix_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { PROTECT( auto outputs__ = torch::flatten(*self, start_dim, end_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_flatten_dense_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::flatten_dense_tensors(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::flip(*self, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fliplr(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::fliplr(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_flipud(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::flipud(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::float_power(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = self->float_power_(*exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_scalar(tensor *out__, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::float_power(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_scalar_out(tensor *out__, tensor out, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::float_power_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_tensor_(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = self->float_power_(*exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_tensor_scalar(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::float_power(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::float_power_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_float_power_tensor_tensor_out(tensor *out__, tensor out, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::float_power_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::floor(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::floor_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_divide(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::floor_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_divide_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->floor_divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_divide_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::floor_divide_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_divide_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::floor_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_divide_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->floor_divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_floor_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::floor_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmax(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmax(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmax_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmax_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmin(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmin(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmin_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmin_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::fmod(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->fmod_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::fmod_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmod(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->fmod_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fmod_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmod_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frac(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frac(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frac_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frac_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frac_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::frac_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { @@ -6728,27 +6708,27 @@ int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_d auto outputs__ = torch::fractional_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool2d_output(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { @@ -6756,9 +6736,9 @@ int atg_fractional_max_pool2d_output(tensor *out__, tensor output, tensor indice auto outputs__ = torch::fractional_max_pool2d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { @@ -6766,27 +6746,27 @@ int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_d auto outputs__ = torch::fractional_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fractional_max_pool3d_output(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { @@ -6794,9 +6774,9 @@ int atg_fractional_max_pool3d_output(tensor *out__, tensor output, tensor indice auto outputs__ = torch::fractional_max_pool3d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frexp(tensor *out__, tensor self) { @@ -6804,9 +6784,9 @@ int atg_frexp(tensor *out__, tensor self) { auto outputs__ = torch::frexp(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frexp_tensor_out(tensor *out__, tensor mantissa, tensor exponent, tensor self) { @@ -6814,234 +6794,234 @@ int atg_frexp_tensor_out(tensor *out__, tensor mantissa, tensor exponent, tensor auto outputs__ = torch::frexp_out(*mantissa, *exponent, *self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frobenius_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frobenius_norm(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frobenius_norm_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::frobenius_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::frobenius_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_from_file(tensor *out__, char * filename, int shared, int64_t size, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::from_file(std::string(filename), (bool)shared, size, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_full_like(tensor *out__, tensor self, scalar fill_value) { PROTECT( auto outputs__ = torch::full_like(*self, *fill_value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, scalar fill_value) { PROTECT( auto outputs__ = torch::full_out(*out, torch::IntArrayRef(size_data, size_len), *fill_value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_fused_moving_avg_obs_fake_quant(tensor *out__, tensor self, tensor observer_on, tensor fake_quant_on, tensor running_min, tensor running_max, tensor scale, tensor zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, int per_row_fake_quant, int symmetric_quant) { PROTECT( auto outputs__ = torch::fused_moving_avg_obs_fake_quant(*self, *observer_on, *fake_quant_on, *running_min, *running_max, *scale, *zero_point, averaging_const, quant_min, quant_max, ch_axis, (bool)per_row_fake_quant, (bool)symmetric_quant); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather(*self, dim, *index, (bool)sparse_grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gather_backward(tensor *out__, tensor grad, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather_backward(*grad, *self, dim, *index, (bool)sparse_grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather_out(*out, *self, dim, *index, (bool)sparse_grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gcd(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gcd(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gcd_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gcd_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gcd_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gcd_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ge(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->ge_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ge_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ge(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->ge_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ge_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ge_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gelu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::gelu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { PROTECT( auto outputs__ = torch::gelu_backward(*grad, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gelu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad, tensor self) { PROTECT( auto outputs__ = torch::gelu_backward_out(*grad_input, *grad, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gelu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::gelu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_geometric_(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = self->geometric_(p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_geqrf(tensor *out__, tensor self) { @@ -7049,9 +7029,9 @@ int atg_geqrf(tensor *out__, tensor self) { auto outputs__ = torch::geqrf(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_geqrf_a(tensor *out__, tensor a, tensor tau, tensor self) { @@ -7059,198 +7039,198 @@ int atg_geqrf_a(tensor *out__, tensor a, tensor tau, tensor self) { auto outputs__ = torch::geqrf_out(*a, *tau, *self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ger(tensor *out__, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::ger(*self, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::ger_out(*out, *self, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_glu(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu(*self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_backward(*grad_output, *self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_glu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_backward_out(*grad_input, *grad_output, *self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_out(*out, *self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grad(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->grad(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::greater(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->greater_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::greater_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->greater_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::greater_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::greater_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->greater_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::greater_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::greater_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::greater(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->greater_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_greater_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::greater_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_2d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { @@ -7258,18 +7238,18 @@ int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input auto outputs__ = torch::grid_sampler_2d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_3d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { @@ -7277,18 +7257,18 @@ int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input auto outputs__ = torch::grid_sampler_3d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::group_norm(*input, num_groups, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { @@ -7296,18 +7276,18 @@ int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int par auto outputs__ = torch::gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::gru_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gru_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { @@ -7315,1009 +7295,1005 @@ int atg_gru_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tens auto outputs__ = torch::gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::gt(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->gt_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::gt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gt(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->gt_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_gt_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hamming_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hamming_window_periodic_alpha(tensor *out__, int64_t window_length, int periodic, double alpha, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hamming_window_periodic_alpha_beta(tensor *out__, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hann_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hann_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hann_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardshrink(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardshrink(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::hardshrink_backward(*grad_out, *self, *lambd); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardshrink_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_out, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::hardshrink_backward_out(*grad_input, *grad_out, *self, *lambd); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardshrink_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::hardshrink_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardsigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardsigmoid(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardsigmoid_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardsigmoid_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardsigmoid_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::hardsigmoid_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardsigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::hardsigmoid_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardsigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::hardsigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardswish(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardswish(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardswish_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardswish_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardswish_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::hardswish_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardswish_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::hardswish_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardtanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardtanh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardtanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardtanh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar min_val, scalar max_val) { PROTECT( auto outputs__ = torch::hardtanh_backward(*grad_output, *self, *min_val, *max_val); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardtanh_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val) { PROTECT( auto outputs__ = torch::hardtanh_backward_out(*grad_input, *grad_output, *self, *min_val, *max_val); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::hardtanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_heaviside(tensor *out__, tensor self, tensor values) { PROTECT( auto outputs__ = torch::heaviside(*self, *values); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_heaviside_(tensor *out__, tensor self, tensor values) { PROTECT( auto outputs__ = self->heaviside_(*values); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_heaviside_out(tensor *out__, tensor out, tensor self, tensor values) { PROTECT( auto outputs__ = torch::heaviside_out(*out, *self, *values); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::hinge_embedding_loss(*self, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_histc(tensor *out__, tensor self, int64_t bins) { PROTECT( auto outputs__ = torch::histc(*self, bins); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { PROTECT( auto outputs__ = torch::histc_out(*out, *self, bins); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hsplit(tensor *out__, tensor self, int64_t sections) { PROTECT( auto outputs__ = torch::hsplit(*self, sections); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { PROTECT( auto outputs__ = torch::hsplit(*self, torch::IntArrayRef(indices_data, indices_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::hspmm(*mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::hspmm_out(*out, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hstack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::hstack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::hstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_huber_loss(tensor *out__, tensor self, tensor target, int64_t reduction, double delta) { PROTECT( auto outputs__ = torch::huber_loss(*self, *target, reduction, delta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_huber_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta) { PROTECT( auto outputs__ = torch::huber_loss_backward(*grad_output, *self, *target, reduction, delta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_huber_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double delta) { PROTECT( auto outputs__ = torch::huber_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, delta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_huber_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction, double delta) { PROTECT( auto outputs__ = torch::huber_loss_out(*out, *self, *target, reduction, delta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hypot(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::hypot(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hypot_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->hypot_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_hypot_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::hypot_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_i0(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::i0(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_i0_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::i0_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_i0_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::i0_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igamma(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::igamma(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igamma_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->igamma_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igamma_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::igamma_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igammac(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::igammac(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igammac_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->igammac_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_igammac_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::igammac_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_backward(*grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_im2col_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_backward_out(*grad_input, *grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_imag(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::imag(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) { PROTECT( auto outputs__ = torch::index(*self, of_carray_tensor_opt(indices_data, indices_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::index_add(*self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = self->index_add_(dim, *index, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_add_alpha(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { PROTECT( auto outputs__ = torch::index_add(*self, dim, *index, *source, *alpha); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_add_alpha_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source, scalar alpha) { PROTECT( auto outputs__ = self->index_add_(dim, *index, *source, *alpha); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::index_copy(*self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = self->index_copy_(dim, *index, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = torch::index_fill(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = self->index_fill_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_fill_int_tensor(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { PROTECT( auto outputs__ = torch::index_fill(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_fill_int_tensor_(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { PROTECT( auto outputs__ = self->index_fill_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { PROTECT( auto outputs__ = torch::index_put(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { PROTECT( auto outputs__ = torch::index_put_(*self, of_carray_tensor_opt(indices_data, indices_len), *values, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select(*self, dim, *index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_select_backward(tensor *out__, tensor grad, int64_t *self_sizes_data, int self_sizes_len, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select_backward(*grad, torch::IntArrayRef(self_sizes_data, self_sizes_len), dim, *index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select_out(*out, *self, dim, *index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_indices(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->indices(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_infinitely_differentiable_gelu_backward(tensor *out__, tensor grad, tensor self) { PROTECT( auto outputs__ = torch::infinitely_differentiable_gelu_backward(*grad, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_inner(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::inner(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_inner_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::inner_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::instance_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)use_input_stats, momentum, eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_int_repr(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::int_repr(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_inverse(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::inverse(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_inverse_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::inverse_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double atol, int equal_nan) { PROTECT( auto outputs__ = torch::isclose(*self, *other, rtol, atol, (bool)equal_nan); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isfinite(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isfinite(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin(tensor *out__, tensor elements, tensor test_elements, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin(*elements, *test_elements, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin_scalar_tensor(tensor *out__, scalar element, tensor test_elements, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin(*element, *test_elements, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin_scalar_tensor_out(tensor *out__, tensor out, scalar element, tensor test_elements, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin_out(*out, *element, *test_elements, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin_tensor_scalar(tensor *out__, tensor elements, scalar test_element, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin(*elements, *test_element, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin_tensor_scalar_out(tensor *out__, tensor out, tensor elements, scalar test_element, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin_out(*out, *elements, *test_element, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isin_tensor_tensor_out(tensor *out__, tensor out, tensor elements, tensor test_elements, int assume_unique, int invert) { PROTECT( auto outputs__ = torch::isin_out(*out, *elements, *test_elements, (bool)assume_unique, (bool)invert); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isinf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isinf(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isnan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isnan(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isneginf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isneginf(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isneginf_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::isneginf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isposinf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isposinf(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isposinf_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::isposinf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_isreal(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isreal(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_istft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int center, int normalized, int onesided, int64_t length, int return_complex) { PROTECT( auto outputs__ = torch::istft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)center, (bool)normalized, (bool)onesided, length, (bool)return_complex); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kaiser_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::kaiser_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kaiser_window_beta(tensor *out__, int64_t window_length, int periodic, double beta, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kaiser_window_periodic(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction, int log_target) { PROTECT( auto outputs__ = torch::kl_div(*self, *target, reduction, (bool)log_target); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, int log_target) { PROTECT( auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction, (bool)log_target); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kron(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::kron(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kron_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::kron_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim) { @@ -8325,9 +8301,9 @@ int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim auto outputs__ = torch::kthvalue(*self, k, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_kthvalue_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim) { @@ -8335,405 +8311,405 @@ int atg_kthvalue_values(tensor *out__, tensor values, tensor indices, tensor sel auto outputs__ = torch::kthvalue_out(*values, *indices, *self, k, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_l1_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable) { PROTECT( auto outputs__ = torch::layer_norm(*input, torch::IntArrayRef(normalized_shape_data, normalized_shape_len), (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enable); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lcm(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lcm(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lcm_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lcm_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lcm_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lcm_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ldexp(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ldexp(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ldexp_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ldexp_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ldexp_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ldexp_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::le(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->le_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::le_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::le(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->le_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_le_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::le_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_leaky_relu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_leaky_relu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scalar negative_slope, int self_is_result) { PROTECT( auto outputs__ = torch::leaky_relu_backward(*grad_output, *self, *negative_slope, (bool)self_is_result); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_leaky_relu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope, int self_is_result) { PROTECT( auto outputs__ = torch::leaky_relu_backward_out(*grad_input, *grad_output, *self, *negative_slope, (bool)self_is_result); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = torch::lerp(*self, *end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = self->lerp_(*end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp_scalar_out(tensor *out__, tensor out, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp_tensor(tensor *out__, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = torch::lerp(*self, *end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp_tensor_(tensor *out__, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = self->lerp_(*end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lerp_tensor_out(tensor *out__, tensor out, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::less(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->less_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::less_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->less_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::less_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::less_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->less_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::less_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::less_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::less(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->less_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_less_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::less_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lgamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::lgamma(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lgamma_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->lgamma_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lgamma_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::lgamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cholesky(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::linalg_cholesky(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cholesky_ex(tensor *out__, tensor self, int upper, int check_errors) { @@ -8741,9 +8717,9 @@ int atg_linalg_cholesky_ex(tensor *out__, tensor self, int upper, int check_erro auto outputs__ = torch::linalg_cholesky_ex(*self, (bool)upper, (bool)check_errors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cholesky_ex_l(tensor *out__, tensor L, tensor info, tensor self, int upper, int check_errors) { @@ -8751,72 +8727,72 @@ int atg_linalg_cholesky_ex_l(tensor *out__, tensor L, tensor info, tensor self, auto outputs__ = torch::linalg_cholesky_ex_out(*L, *info, *self, (bool)upper, (bool)check_errors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( auto outputs__ = torch::linalg_cholesky_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cond(tensor *out__, tensor self, scalar p) { PROTECT( auto outputs__ = torch::linalg_cond(*self, *p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cond_out(tensor *out__, tensor out, tensor self, scalar p) { PROTECT( auto outputs__ = torch::linalg_cond_out(*out, *self, *p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cond_p_str(tensor *out__, tensor self, char * p) { PROTECT( auto outputs__ = torch::linalg_cond(*self, std::string(p)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_cond_p_str_out(tensor *out__, tensor out, tensor self, char * p) { PROTECT( auto outputs__ = torch::linalg_cond_out(*out, *self, std::string(p)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_det(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::linalg_det(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_det_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::linalg_det_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eig(tensor *out__, tensor self) { @@ -8824,9 +8800,9 @@ int atg_linalg_eig(tensor *out__, tensor self) { auto outputs__ = torch::linalg_eig(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eig_out(tensor *out__, tensor eigenvalues, tensor eigenvectors, tensor self) { @@ -8834,9 +8810,9 @@ int atg_linalg_eig_out(tensor *out__, tensor eigenvalues, tensor eigenvectors, t auto outputs__ = torch::linalg_eig_out(*eigenvalues, *eigenvectors, *self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigh(tensor *out__, tensor self, char * UPLO) { @@ -8844,9 +8820,9 @@ int atg_linalg_eigh(tensor *out__, tensor self, char * UPLO) { auto outputs__ = torch::linalg_eigh(*self, std::string(UPLO)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigh_eigvals(tensor *out__, tensor eigvals, tensor eigvecs, tensor self, char * UPLO) { @@ -8854,72 +8830,72 @@ int atg_linalg_eigh_eigvals(tensor *out__, tensor eigvals, tensor eigvecs, tenso auto outputs__ = torch::linalg_eigh_out(*eigvals, *eigvecs, *self, std::string(UPLO)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigvals(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::linalg_eigvals(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigvals_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::linalg_eigvals_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigvalsh(tensor *out__, tensor self, char * UPLO) { PROTECT( auto outputs__ = torch::linalg_eigvalsh(*self, std::string(UPLO)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_eigvalsh_out(tensor *out__, tensor out, tensor self, char * UPLO) { PROTECT( auto outputs__ = torch::linalg_eigvalsh_out(*out, *self, std::string(UPLO)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_householder_product(tensor *out__, tensor input, tensor tau) { PROTECT( auto outputs__ = torch::linalg_householder_product(*input, *tau); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_householder_product_out(tensor *out__, tensor out, tensor input, tensor tau) { PROTECT( auto outputs__ = torch::linalg_householder_product_out(*out, *input, *tau); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_inv(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::linalg_inv(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_inv_ex(tensor *out__, tensor self, int check_errors) { @@ -8927,9 +8903,9 @@ int atg_linalg_inv_ex(tensor *out__, tensor self, int check_errors) { auto outputs__ = torch::linalg_inv_ex(*self, (bool)check_errors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_inv_ex_inverse(tensor *out__, tensor inverse, tensor info, tensor self, int check_errors) { @@ -8937,18 +8913,18 @@ int atg_linalg_inv_ex_inverse(tensor *out__, tensor inverse, tensor info, tensor auto outputs__ = torch::linalg_inv_ex_out(*inverse, *info, *self, (bool)check_errors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_inv_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::linalg_inv_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_lstsq(tensor *out__, tensor self, tensor b, double rcond, char * driver) { @@ -8958,9 +8934,9 @@ int atg_linalg_lstsq(tensor *out__, tensor self, tensor b, double rcond, char * out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_lstsq_out(tensor *out__, tensor solution, tensor residuals, tensor rank, tensor singular_values, tensor self, tensor b, double rcond, char * driver) { @@ -8970,135 +8946,135 @@ int atg_linalg_lstsq_out(tensor *out__, tensor solution, tensor residuals, tenso out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::linalg_matmul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::linalg_matmul_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::linalg_matrix_power(*self, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_power_out(tensor *out__, tensor out, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::linalg_matrix_power_out(*out, *self, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_rank(tensor *out__, tensor self, double tol, int hermitian) { PROTECT( auto outputs__ = torch::linalg_matrix_rank(*self, tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_rank_out(tensor *out__, tensor out, tensor self, double tol, int hermitian) { PROTECT( auto outputs__ = torch::linalg_matrix_rank_out(*out, *self, tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_rank_out_tol_tensor(tensor *out__, tensor out, tensor input, tensor tol, int hermitian) { PROTECT( auto outputs__ = torch::linalg_matrix_rank_out(*out, *input, *tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_matrix_rank_tol_tensor(tensor *out__, tensor input, tensor tol, int hermitian) { PROTECT( auto outputs__ = torch::linalg_matrix_rank(*input, *tol, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_multi_dot(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::linalg_multi_dot(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_multi_dot_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::linalg_multi_dot_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_pinv(tensor *out__, tensor self, double rcond, int hermitian) { PROTECT( auto outputs__ = torch::linalg_pinv(*self, rcond, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_pinv_out(tensor *out__, tensor out, tensor self, double rcond, int hermitian) { PROTECT( auto outputs__ = torch::linalg_pinv_out(*out, *self, rcond, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_pinv_out_rcond_tensor(tensor *out__, tensor out, tensor self, tensor rcond, int hermitian) { PROTECT( auto outputs__ = torch::linalg_pinv_out(*out, *self, *rcond, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_pinv_rcond_tensor(tensor *out__, tensor self, tensor rcond, int hermitian) { PROTECT( auto outputs__ = torch::linalg_pinv(*self, *rcond, (bool)hermitian); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_qr(tensor *out__, tensor self, char * mode) { @@ -9106,9 +9082,9 @@ int atg_linalg_qr(tensor *out__, tensor self, char * mode) { auto outputs__ = torch::linalg_qr(*self, std::string(mode)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, char * mode) { @@ -9116,9 +9092,9 @@ int atg_linalg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, char * mod auto outputs__ = torch::linalg_qr_out(*Q, *R, *self, std::string(mode)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_slogdet(tensor *out__, tensor self) { @@ -9126,9 +9102,9 @@ int atg_linalg_slogdet(tensor *out__, tensor self) { auto outputs__ = torch::linalg_slogdet(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_slogdet_out(tensor *out__, tensor sign, tensor logabsdet, tensor self) { @@ -9136,27 +9112,27 @@ int atg_linalg_slogdet_out(tensor *out__, tensor sign, tensor logabsdet, tensor auto outputs__ = torch::linalg_slogdet_out(*sign, *logabsdet, *self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_solve(tensor *out__, tensor input, tensor other) { PROTECT( auto outputs__ = torch::linalg_solve(*input, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_solve_out(tensor *out__, tensor out, tensor input, tensor other) { PROTECT( auto outputs__ = torch::linalg_solve_out(*out, *input, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_svd(tensor *out__, tensor self, int full_matrices) { @@ -9165,9 +9141,9 @@ int atg_linalg_svd(tensor *out__, tensor self, int full_matrices) { out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor self, int full_matrices) { @@ -9176,513 +9152,513 @@ int atg_linalg_svd_u(tensor *out__, tensor U, tensor S, tensor Vh, tensor self, out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_svdvals(tensor *out__, tensor input) { PROTECT( auto outputs__ = torch::linalg_svdvals(*input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_svdvals_out(tensor *out__, tensor out, tensor input) { PROTECT( auto outputs__ = torch::linalg_svdvals_out(*out, *input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_tensorinv(tensor *out__, tensor self, int64_t ind) { PROTECT( auto outputs__ = torch::linalg_tensorinv(*self, ind); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_tensorinv_out(tensor *out__, tensor out, tensor self, int64_t ind) { PROTECT( auto outputs__ = torch::linalg_tensorinv_out(*out, *self, ind); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_tensorsolve(tensor *out__, tensor self, tensor other, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::linalg_tensorsolve(*self, *other, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linalg_tensorsolve_out(tensor *out__, tensor out, tensor self, tensor other, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::linalg_tensorsolve_out(*out, *self, *other, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::linear(*input, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linear_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::linear_out(*out, *input, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { PROTECT( auto outputs__ = torch::linspace_out(*out, *start, *end, steps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log10(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log10(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log10_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log10_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log10_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log10_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log1p(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log1p(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log1p_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log1p_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log1p_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log1p_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log2(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log2(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log2_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log2_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log2_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log2_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { PROTECT( auto outputs__ = self->log_normal_(mean, std); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_sigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log_sigmoid(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, tensor buffer) { PROTECT( auto outputs__ = torch::log_sigmoid_backward(*grad_output, *self, *buffer); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_sigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor buffer) { PROTECT( auto outputs__ = torch::log_sigmoid_backward_out(*grad_input, *grad_output, *self, *buffer); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log_sigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logaddexp(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logaddexp(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logaddexp2(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logaddexp2(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logaddexp2_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logaddexp2_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logaddexp_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logaddexp_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logcumsumexp(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::logcumsumexp(*self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::logcumsumexp_out(*out, *self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::logdet(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_and(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_and(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_and_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->logical_and_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_and_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_and_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_not(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::logical_not(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_not_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->logical_not_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_not_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::logical_not_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_or(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_or(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_or_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->logical_or_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_or_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_or_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_xor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_xor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->logical_xor_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logit(tensor *out__, tensor self, double eps) { PROTECT( auto outputs__ = torch::logit(*self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logit_(tensor *out__, tensor self, double eps) { PROTECT( auto outputs__ = torch::logit_(*self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logit_backward(tensor *out__, tensor grad_output, tensor self, double eps) { PROTECT( auto outputs__ = torch::logit_backward(*grad_output, *self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logit_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, double eps) { PROTECT( auto outputs__ = torch::logit_backward_out(*grad_input, *grad_output, *self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logit_out(tensor *out__, tensor out, tensor self, double eps) { PROTECT( auto outputs__ = torch::logit_out(*out, *self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { PROTECT( auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { @@ -9691,9 +9667,9 @@ int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *p out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { @@ -9701,9 +9677,9 @@ int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tens auto outputs__ = torch::lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lstm_data(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { @@ -9712,9 +9688,9 @@ int atg_lstm_data(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_dat out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lstsq(tensor *out__, tensor self, tensor A) { @@ -9722,9 +9698,9 @@ int atg_lstsq(tensor *out__, tensor self, tensor A) { auto outputs__ = torch::lstsq(*self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lstsq_x(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { @@ -9732,81 +9708,81 @@ int atg_lstsq_x(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { auto outputs__ = torch::lstsq_out(*X, *qr, *self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::lt(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->lt_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::lt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lt(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->lt_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lt_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { PROTECT( auto outputs__ = torch::lu_solve(*self, *LU_data, *LU_pivots); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, tensor LU_pivots) { PROTECT( auto outputs__ = torch::lu_solve_out(*out, *self, *LU_data, *LU_pivots); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lu_unpack(tensor *out__, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots) { @@ -9815,9 +9791,9 @@ int atg_lu_unpack(tensor *out__, tensor LU_data, tensor LU_pivots, int unpack_da out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_lu_unpack_out(tensor *out__, tensor P, tensor L, tensor U, tensor LU_data, tensor LU_pivots, int unpack_data, int unpack_pivots) { @@ -9826,180 +9802,180 @@ int atg_lu_unpack_out(tensor *out__, tensor P, tensor L, tensor U, tensor LU_dat out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::margin_ranking_loss(*input1, *input2, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { PROTECT( auto outputs__ = torch::masked_fill(*self, *mask, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { PROTECT( auto outputs__ = self->masked_fill_(*mask, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_fill_tensor(tensor *out__, tensor self, tensor mask, tensor value) { PROTECT( auto outputs__ = torch::masked_fill(*self, *mask, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_fill_tensor_(tensor *out__, tensor self, tensor mask, tensor value) { PROTECT( auto outputs__ = self->masked_fill_(*mask, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { PROTECT( auto outputs__ = torch::masked_scatter(*self, *mask, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) { PROTECT( auto outputs__ = self->masked_scatter_(*mask, *source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_select(tensor *out__, tensor self, tensor mask) { PROTECT( auto outputs__ = torch::masked_select(*self, *mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_select_backward(tensor *out__, tensor grad, tensor input, tensor mask) { PROTECT( auto outputs__ = torch::masked_select_backward(*grad, *input, *mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { PROTECT( auto outputs__ = torch::masked_select_out(*out, *self, *mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::matmul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::matmul_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_exp(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::matrix_exp(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_exp_backward(tensor *out__, tensor self, tensor grad) { PROTECT( auto outputs__ = torch::matrix_exp_backward(*self, *grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::matrix_power(*self, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_power_out(tensor *out__, tensor out, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::matrix_power_out(*out, *self, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_rank(tensor *out__, tensor self, int symmetric) { PROTECT( auto outputs__ = torch::matrix_rank(*self, (bool)symmetric); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_matrix_rank_tol(tensor *out__, tensor self, double tol, int symmetric) { PROTECT( auto outputs__ = torch::matrix_rank(*self, tol, (bool)symmetric); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::max(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -10007,9 +9983,9 @@ int atg_max_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::max(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_dim_max(tensor *out__, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim) { @@ -10017,36 +9993,36 @@ int atg_max_dim_max(tensor *out__, tensor max, tensor max_values, tensor self, i auto outputs__ = torch::max_out(*max, *max_values, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_other(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::max(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::max_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { @@ -10054,18 +10030,18 @@ int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size auto outputs__ = torch::max_pool1d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { @@ -10073,27 +10049,27 @@ int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size auto outputs__ = torch::max_pool2d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool2d_with_indices_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { @@ -10101,18 +10077,18 @@ int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, t auto outputs__ = torch::max_pool2d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { @@ -10120,27 +10096,27 @@ int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size auto outputs__ = torch::max_pool3d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool3d_with_indices_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { @@ -10148,135 +10124,135 @@ int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, t auto outputs__ = torch::max_pool3d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_maximum(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::maximum(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_maximum_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::maximum_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mean(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::mean(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::mean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_median(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::median(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_median_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -10284,9 +10260,9 @@ int atg_median_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::median(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_median_dim_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { @@ -10294,46 +10270,42 @@ int atg_median_dim_values(tensor *out__, tensor values, tensor indices, tensor s auto outputs__ = torch::median_out(*values, *indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_meshgrid_indexing(tensor *out__, tensor *tensors_data, int tensors_len, char * indexing) { PROTECT( auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len), std::string(indexing)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_min(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::min(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_min_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -10341,9 +10313,9 @@ int atg_min_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::min(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_min_dim_min(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { @@ -10351,45 +10323,45 @@ int atg_min_dim_min(tensor *out__, tensor min, tensor min_indices, tensor self, auto outputs__ = torch::min_out(*min, *min_indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_min_other(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::min(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::min_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_minimum(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::minimum(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_minimum_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::minimum_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { @@ -10398,9 +10370,9 @@ int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bia out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon) { @@ -10409,99 +10381,99 @@ int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_outp out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_bias(*grad_output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { @@ -10512,81 +10484,81 @@ int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_ out__[2] = new torch::Tensor(std::get<2>(outputs__)); out__[3] = new torch::Tensor(std::get<3>(outputs__)); out__[4] = new torch::Tensor(std::get<4>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mish(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::mish(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mish_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::mish_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mish_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::mish_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mish_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::mish_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::mkldnn_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_adaptive_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::mkldnn_adaptive_avg_pool2d_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { PROTECT( auto outputs__ = torch::mkldnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { @@ -10594,27 +10566,27 @@ int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_ auto outputs__ = torch::mkldnn_convolution_backward_weights(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_linear(tensor *out__, tensor self, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::mkldnn_linear(*self, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_linear_backward_input(tensor *out__, int64_t *input_size_data, int input_size_len, tensor grad_output, tensor weight) { PROTECT( auto outputs__ = torch::mkldnn_linear_backward_input(torch::IntArrayRef(input_size_data, input_size_len), *grad_output, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_linear_backward_weights(tensor *out__, tensor grad_output, tensor input, tensor weight, int bias_defined) { @@ -10622,81 +10594,81 @@ int atg_mkldnn_linear_backward_weights(tensor *out__, tensor grad_output, tensor auto outputs__ = torch::mkldnn_linear_backward_weights(*grad_output, *input, *weight, (bool)bias_defined); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::mkldnn_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_max_pool2d_backward(tensor *out__, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::mkldnn_max_pool2d_backward(*grad_output, *output, *input, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::mkldnn_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_max_pool3d_backward(tensor *out__, tensor grad_output, tensor output, tensor input, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::mkldnn_max_pool3d_backward(*grad_output, *output, *input, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_reorder_conv2d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mkldnn_reorder_conv3d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_reorder_conv3d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::mm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::mm_out(*out, *self, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -10704,9 +10676,9 @@ int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::mode(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mode_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { @@ -10714,360 +10686,360 @@ int atg_mode_values(tensor *out__, tensor values, tensor indices, tensor self, i auto outputs__ = torch::mode_out(*values, *indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_moveaxis(tensor *out__, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len) { PROTECT( auto outputs__ = torch::moveaxis(*self, torch::IntArrayRef(source_data, source_len), torch::IntArrayRef(destination_data, destination_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_moveaxis_int(tensor *out__, tensor self, int64_t source, int64_t destination) { PROTECT( auto outputs__ = torch::moveaxis(*self, source, destination); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_movedim(tensor *out__, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len) { PROTECT( auto outputs__ = torch::movedim(*self, torch::IntArrayRef(source_data, source_len), torch::IntArrayRef(destination_data, destination_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_movedim_int(tensor *out__, tensor self, int64_t source, int64_t destination) { PROTECT( auto outputs__ = torch::movedim(*self, source, destination); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mse_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_msort(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::msort(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_msort_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::msort_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::mul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mul_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->mul_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::mul_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mul_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::mul(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mul_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->mul_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::multi_margin_loss_backward(*grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multi_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::multi_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::multilabel_margin_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_backward(*grad_output, *self, *target, reduction, *is_target); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multilabel_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, *is_target); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replacement) { PROTECT( auto outputs__ = torch::multinomial(*self, num_samples, (bool)replacement); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samples, int replacement) { PROTECT( auto outputs__ = torch::multinomial_out(*out, *self, num_samples, (bool)replacement); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multiply(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::multiply(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multiply_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->multiply_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multiply_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::multiply_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multiply_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::multiply(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_multiply_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->multiply_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mv(tensor *out__, tensor self, tensor vec) { PROTECT( auto outputs__ = torch::mv(*self, *vec); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { PROTECT( auto outputs__ = torch::mv_out(*out, *self, *vec); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { PROTECT( auto outputs__ = torch::mvlgamma(*self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { PROTECT( auto outputs__ = self->mvlgamma_(p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_mvlgamma_out(tensor *out__, tensor out, tensor self, int64_t p) { PROTECT( auto outputs__ = torch::mvlgamma_out(*out, *self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nan_to_num(tensor *out__, tensor self, double nan, double posinf, double neginf) { PROTECT( auto outputs__ = torch::nan_to_num(*self, nan, posinf, neginf); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nan_to_num_(tensor *out__, tensor self, double nan, double posinf, double neginf) { PROTECT( auto outputs__ = torch::nan_to_num_(*self, nan, posinf, neginf); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nan_to_num_out(tensor *out__, tensor out, tensor self, double nan, double posinf, double neginf) { PROTECT( auto outputs__ = torch::nan_to_num_out(*out, *self, nan, posinf, neginf); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanmean(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::nanmean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanmean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::nanmean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanmedian(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::nanmedian(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanmedian_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { @@ -11075,9 +11047,9 @@ int atg_nanmedian_dim(tensor *out__, tensor self, int64_t dim, int keepdim) { auto outputs__ = torch::nanmedian(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanmedian_dim_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { @@ -11085,144 +11057,144 @@ int atg_nanmedian_dim_values(tensor *out__, tensor values, tensor indices, tenso auto outputs__ = torch::nanmedian_out(*values, *indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::nanquantile(*self, *q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_new(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::nanquantile(*self, *q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::nanquantile(*self, q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::nanquantile(*self, q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nanquantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::nanquantile_out(*out, *self, q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nansum(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::nansum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nansum_dim_intlist(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::nansum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nansum_intlist_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::nansum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = torch::narrow(*self, dim, start, length); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = torch::narrow_copy(*self, dim, start, length); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_narrow_copy_out(tensor *out__, tensor out, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = torch::narrow_copy_out(*out, *self, dim, start, length); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_narrow_tensor(tensor *out__, tensor self, int64_t dim, tensor start, int64_t length) { PROTECT( auto outputs__ = torch::narrow(*self, dim, *start, length); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { @@ -11231,9 +11203,9 @@ int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bia out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_batch_norm_out(tensor *out__, tensor out, tensor save_mean, tensor save_invstd, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { @@ -11242,9 +11214,9 @@ int atg_native_batch_norm_out(tensor *out__, tensor out, tensor save_mean, tenso out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_group_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps) { @@ -11253,9 +11225,9 @@ int atg_native_group_norm(tensor *out__, tensor input, tensor weight, tensor bia out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps) { @@ -11264,842 +11236,840 @@ int atg_native_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::native_norm(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_native_norm_scalaropt_dim_dtype(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::native_norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ne(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->ne_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ne_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ne(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->ne_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ne_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ne_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_neg(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::neg(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_neg_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::neg_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_neg_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::neg_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_negative(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::negative(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_negative_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::negative_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_negative_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::negative_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_new_empty_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_new_ones(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nextafter(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::nextafter(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nextafter_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->nextafter_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nextafter_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::nextafter_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss2d(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss2d_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss2d_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss2d_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss_nd(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss_nd(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nonzero(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::nonzero(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nonzero_numpy(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::nonzero_numpy(*self); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nonzero_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::nonzero_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::norm(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_dtype_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { PROTECT( auto outputs__ = torch::norm_except_dim(*v, pow, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_scalaropt_dim(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_scalaropt_dim_dtype(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_norm_scalaropt_dtype(tensor *out__, tensor self, scalar p, int dtype) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_normal(tensor *out__, tensor out, tensor mean, double std) { PROTECT( auto outputs__ = torch::normal_out(*out, *mean, std); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_normal_(tensor *out__, tensor self, double mean, double std) { PROTECT( auto outputs__ = self->normal_(mean, std); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_normal_float_float_out(tensor *out__, tensor out, double mean, double std, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::normal_out(*out, mean, std, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_normal_float_tensor_out(tensor *out__, tensor out, double mean, tensor std) { PROTECT( auto outputs__ = torch::normal_out(*out, mean, *std); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_normal_tensor_tensor_out(tensor *out__, tensor out, tensor mean, tensor std) { PROTECT( auto outputs__ = torch::normal_out(*out, *mean, *std); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::not_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->not_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::not_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::not_equal(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->not_equal_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_not_equal_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::not_equal_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm(*self, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nuclear_norm_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nuclear_norm_dim_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm_out(*out, *self, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_numpy_t(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->numpy_T(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { PROTECT( auto outputs__ = torch::one_hot(*self, num_classes); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ones_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ones_like(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::ones_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_orgqr(tensor *out__, tensor self, tensor input2) { PROTECT( auto outputs__ = torch::orgqr(*self, *input2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { PROTECT( auto outputs__ = torch::orgqr_out(*out, *self, *input2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left, int transpose) { PROTECT( auto outputs__ = torch::ormqr(*self, *input2, *input3, (bool)left, (bool)transpose); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose) { PROTECT( auto outputs__ = torch::ormqr_out(*out, *self, *input2, *input3, (bool)left, (bool)transpose); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_outer(tensor *out__, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::outer(*self, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_outer_out(tensor *out__, tensor out, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::outer_out(*out, *self, *vec2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pad_sequence(tensor *out__, tensor *sequences_data, int sequences_len, int batch_first, double padding_value) { PROTECT( auto outputs__ = torch::pad_sequence(of_carray_tensor(sequences_data, sequences_len), (bool)batch_first, padding_value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { PROTECT( auto outputs__ = torch::pairwise_distance(*x1, *x2, p, eps, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pdist(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = torch::pdist(*self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::permute(*self, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pin_memory(tensor *out__, tensor self, int device) { PROTECT( auto outputs__ = self->pin_memory(device_of_int(device)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pinverse(tensor *out__, tensor self, double rcond) { PROTECT( auto outputs__ = torch::pinverse(*self, rcond); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { PROTECT( auto outputs__ = torch::pixel_shuffle(*self, upscale_factor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pixel_unshuffle(tensor *out__, tensor self, int64_t downscale_factor) { PROTECT( auto outputs__ = torch::pixel_unshuffle(*self, downscale_factor); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_poisson(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::poisson(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction) { PROTECT( auto outputs__ = torch::poisson_nll_loss(*input, *target, (bool)log_input, (bool)full, eps, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_polar(tensor *out__, tensor abs, tensor angle) { PROTECT( auto outputs__ = torch::polar(*abs, *angle); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_polar_out(tensor *out__, tensor out, tensor abs, tensor angle) { PROTECT( auto outputs__ = torch::polar_out(*out, *abs, *angle); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_polygamma(tensor *out__, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::polygamma(n, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_polygamma_(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = self->polygamma_(n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::polygamma_out(*out, n, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_positive(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::positive(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = self->pow_(*exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_scalar(tensor *out__, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_scalar_out(tensor *out__, tensor out, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_tensor_(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = self->pow_(*exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_tensor_scalar(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_tensor_scalar_out(tensor *out__, tensor out, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_pow_tensor_tensor_out(tensor *out__, tensor out, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_prelu(tensor *out__, tensor self, tensor weight) { PROTECT( auto outputs__ = torch::prelu(*self, *weight); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor weight) { @@ -12107,72 +12077,72 @@ int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor we auto outputs__ = torch::prelu_backward(*grad_output, *self, *weight); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_prod(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::prod(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_prod_dim_int(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::prod(*self, dim, (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_prod_int_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_put(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { PROTECT( auto outputs__ = torch::put(*self, *index, *source, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { PROTECT( auto outputs__ = self->put_(*index, *source, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_q_per_channel_scales(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::q_per_channel_scales(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_q_per_channel_zero_points(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::q_per_channel_zero_points(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_qr(tensor *out__, tensor self, int some) { @@ -12180,9 +12150,9 @@ int atg_qr(tensor *out__, tensor self, int some) { auto outputs__ = torch::qr(*self, (bool)some); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_qr_q(tensor *out__, tensor Q, tensor R, tensor self, int some) { @@ -12190,140 +12160,138 @@ int atg_qr_q(tensor *out__, tensor Q, tensor R, tensor self, int some) { auto outputs__ = torch::qr_out(*Q, *R, *self, (bool)some); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::quantile(*self, *q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_new(tensor *out__, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::quantile(*self, *q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_new_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, *q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_new_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::quantile(*self, q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_new_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim, char * interpolation) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, q, dim, (bool)keepdim, std::string(interpolation)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_out(tensor *out__, tensor out, tensor self, tensor q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, *q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_scalar(tensor *out__, tensor self, double q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::quantile(*self, q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantile_scalar_out(tensor *out__, tensor out, tensor self, double q, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::quantile_out(*out, *self, q, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t zero_point, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_tensor(*self, scale, zero_point, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantize_per_tensor_tensor_qparams(tensor *out__, tensor self, tensor scale, tensor zero_point, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_tensor(*self, *scale, *zero_point, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantize_per_tensor_tensors(tensor *out__, tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_tensor(of_carray_tensor(tensors_data, tensors_len), *scales, *zero_points, torch::ScalarType(dtype)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point) { PROTECT( auto outputs__ = torch::quantized_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *var, eps, output_scale, output_zero_point); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_gru_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { @@ -12331,747 +12299,747 @@ int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx auto outputs__ = torch::quantized_lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::quantized_max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::quantized_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_rnn_relu_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rad2deg(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rad2deg(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rad2deg_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rad2deg_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rad2deg_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::rad2deg_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::rand(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rand_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rand_like(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::rand_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint(high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint_like(tensor *out__, tensor self, int64_t high) { PROTECT( auto outputs__ = torch::randint_like(*self, high); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint_like_low_dtype(tensor *out__, tensor self, int64_t low, int64_t high) { PROTECT( auto outputs__ = torch::randint_like(*self, low, high); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint_low(tensor *out__, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint(low, high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint_low_out(tensor *out__, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randint_out(*out, low, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randint_out(*out, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randn(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randn_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::randn_like(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randn_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_random_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->random_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_random_from_(tensor *out__, tensor self, int64_t from, int64_t to) { PROTECT( auto outputs__ = self->random_(from, to); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_random_to_(tensor *out__, tensor self, int64_t to) { PROTECT( auto outputs__ = self->random_(to); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randperm(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_randperm_out(tensor *out__, tensor out, int64_t n) { PROTECT( auto outputs__ = torch::randperm_out(*out, n); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( auto outputs__ = torch::range_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_range_step(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_ravel(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ravel(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_real(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::real(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reciprocal(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::reciprocal(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reciprocal_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::reciprocal_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::reciprocal_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reflection_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_relu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_relu6(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu6(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_relu6_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu6_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_relu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::remainder_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_scalar_tensor(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_tensor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_tensor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_remainder_tensor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::remainder_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = torch::renorm(*self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = self->renorm_(*p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = torch::renorm_out(*out, *self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_len) { PROTECT( auto outputs__ = self->repeat(torch::IntArrayRef(repeats_data, repeats_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_repeat_interleave(tensor *out__, tensor repeats, int64_t output_size) { PROTECT( auto outputs__ = torch::repeat_interleave(*repeats, output_size); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_repeat_interleave_self_int(tensor *out__, tensor self, int64_t repeats, int64_t dim, int64_t output_size) { PROTECT( auto outputs__ = torch::repeat_interleave(*self, repeats, dim, output_size); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_repeat_interleave_self_tensor(tensor *out__, tensor self, tensor repeats, int64_t dim, int64_t output_size) { PROTECT( auto outputs__ = torch::repeat_interleave(*self, *repeats, dim, output_size); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_requires_grad_(tensor *out__, tensor self, int requires_grad) { PROTECT( auto outputs__ = self->requires_grad_((bool)requires_grad); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( auto outputs__ = torch::reshape(*self, torch::IntArrayRef(shape_data, shape_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_reshape_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->reshape_as(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->resize_(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { PROTECT( auto outputs__ = torch::resize_as_(*self, *the_template); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_resize_as_sparse_(tensor *out__, tensor self, tensor the_template) { PROTECT( auto outputs__ = torch::resize_as_sparse_(*self, *the_template); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_resolve_conj(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::resolve_conj(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_resolve_neg(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::resolve_neg(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { @@ -13079,18 +13047,18 @@ int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, in auto outputs__ = torch::rnn_relu(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::rnn_relu_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_relu_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { @@ -13098,9 +13066,9 @@ int atg_rnn_relu_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, auto outputs__ = torch::rnn_relu(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { @@ -13108,18 +13076,18 @@ int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, in auto outputs__ = torch::rnn_tanh(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rnn_tanh_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { @@ -13127,675 +13095,675 @@ int atg_rnn_tanh_data(tensor *out__, tensor data, tensor batch_sizes, tensor hx, auto outputs__ = torch::rnn_tanh(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::roll(*self, torch::IntArrayRef(shifts_data, shifts_len), torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::rot90(*self, k, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_round(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::round(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_round_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::round_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_round_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::round_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_row_stack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::row_stack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_row_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::row_stack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu(tensor *out__, tensor self, int training) { PROTECT( auto outputs__ = torch::rrelu(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu_(tensor *out__, tensor self, int training) { PROTECT( auto outputs__ = torch::rrelu_(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training, int self_is_result) { PROTECT( auto outputs__ = torch::rrelu_with_noise_backward(*grad_output, *self, *noise, *lower, *upper, (bool)training, (bool)self_is_result); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_out(*out, *self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rsqrt(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rsqrt(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rsqrt_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rsqrt_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::rsqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rsub(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_rsub_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::scalar_tensor(*s, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter_add(*self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = self->scatter_add_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_add_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter_add_out(*out, *self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_reduce(tensor *out__, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_reduce_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_reduce_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src, char * reduce) { PROTECT( auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *src, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_src_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value_reduce(tensor *out__, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value_reduce_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_scatter_value_reduce_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, scalar value, char * reduce) { PROTECT( auto outputs__ = torch::scatter_out(*out, *self, dim, *index, *value, std::string(reduce)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_searchsorted(tensor *out__, tensor sorted_sequence, tensor self, int out_int32, int right) { PROTECT( auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_searchsorted_scalar(tensor *out__, tensor sorted_sequence, scalar self, int out_int32, int right) { PROTECT( auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_searchsorted_tensor_out(tensor *out__, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right) { PROTECT( auto outputs__ = torch::searchsorted_out(*out, *sorted_sequence, *self, (bool)out_int32, (bool)right); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_segment_reduce(tensor *out__, tensor data, char * reduce, tensor lengths, tensor indices, int64_t axis, int unsafe, scalar initial) { PROTECT( auto outputs__ = torch::segment_reduce(*data, std::string(reduce), (lengths ? *lengths : torch::Tensor()), (indices ? *indices : torch::Tensor()), axis, (bool)unsafe, *initial); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { PROTECT( auto outputs__ = torch::select(*self, dim, index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_select_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index) { PROTECT( auto outputs__ = torch::select_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_selu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_selu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_set_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->set_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_set_requires_grad(tensor *out__, tensor self, int r) { PROTECT( auto outputs__ = self->set_requires_grad((bool)r); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_set_source_tensor_(tensor *out__, tensor self, tensor source) { PROTECT( auto outputs__ = self->set_(*source); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sgn(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sgn(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sgn_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->sgn_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sgn_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sgn_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sigmoid(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sigmoid_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sigmoid_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::sigmoid_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sigmoid_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::sigmoid_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sign(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sign(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sign_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->sign_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sign_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sign_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_signbit(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::signbit(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_signbit_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::signbit_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_silu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::silu(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_silu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::silu_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_silu_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::silu_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_silu_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::silu_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_silu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::silu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sin(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sin_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sin_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sin_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinc(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinc_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinc_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sinc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sinh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step) { PROTECT( auto outputs__ = torch::slice(*self, dim, start, end, step); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slice_backward(tensor *out__, tensor grad_output, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step) { PROTECT( auto outputs__ = torch::slice_backward(*grad_output, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, start, end, step); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slogdet(tensor *out__, tensor self) { @@ -13803,243 +13771,243 @@ int atg_slogdet(tensor *out__, tensor self) { auto outputs__ = torch::slogdet(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::slow_conv3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::slow_conv3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_dilated2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_dilated3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_smm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::smm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_smooth_l1_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_soft_margin_loss_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softplus(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::softplus(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softplus_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softplus_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::softplus_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softshrink(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::softshrink(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::softshrink_backward(*grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softshrink_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::softshrink_backward_out(*grad_input, *grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_softshrink_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::softshrink_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_solve(tensor *out__, tensor self, tensor A) { @@ -14047,9 +14015,9 @@ int atg_solve(tensor *out__, tensor self, tensor A) { auto outputs__ = torch::solve(*self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_solve_solution(tensor *out__, tensor solution, tensor lu, tensor self, tensor A) { @@ -14057,9 +14025,9 @@ int atg_solve_solution(tensor *out__, tensor solution, tensor lu, tensor self, t auto outputs__ = torch::solve_out(*solution, *lu, *self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { @@ -14067,9 +14035,9 @@ int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { auto outputs__ = torch::sort(*self, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sort_stable(tensor *out__, tensor self, int stable, int64_t dim, int descending) { @@ -14077,9 +14045,9 @@ int atg_sort_stable(tensor *out__, tensor self, int stable, int64_t dim, int des auto outputs__ = torch::sort(*self, (bool)stable, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sort_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int descending) { @@ -14087,9 +14055,9 @@ int atg_sort_values(tensor *out__, tensor values, tensor indices, tensor self, i auto outputs__ = torch::sort_out(*values, *indices, *self, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sort_values_stable(tensor *out__, tensor values, tensor indices, tensor self, int stable, int64_t dim, int descending) { @@ -14097,910 +14065,906 @@ int atg_sort_values_stable(tensor *out__, tensor values, tensor indices, tensor auto outputs__ = torch::sort_out(*values, *indices, *self, (bool)stable, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_coo_tensor_indices(tensor *out__, tensor indices, tensor values, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(*indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_coo_tensor_indices_size(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_csr_tensor(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_csr_tensor(*crow_indices, *col_indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_csr_tensor_crow_col_value_size(tensor *out__, tensor crow_indices, tensor col_indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_csr_tensor(*crow_indices, *col_indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { PROTECT( auto outputs__ = self->sparse_mask(*mask); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( auto outputs__ = self->sparse_resize_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( auto outputs__ = self->sparse_resize_and_clear_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_digamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_digamma(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_digamma_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_digamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_entr(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_entr(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_entr_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_entr_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_erf(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erf_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_erf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_erfc(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_erfc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfcx(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_erfcx(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfcx_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_erfcx_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfinv(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_erfinv(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_erfinv_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_erfinv_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_exp2(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_exp2(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_exp2_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_exp2_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_expit(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_expit(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_expit_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_expit_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_expm1(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_expm1(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_expm1_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_expm1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammainc(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_gammainc(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammainc_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_gammainc_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammaincc(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_gammaincc(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammaincc_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_gammaincc_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammaln(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_gammaln(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_gammaln_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_gammaln_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i0(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_i0(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i0_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_i0_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i0e(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_i0e(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i0e_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_i0e_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i1(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_i1(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i1_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_i1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i1e(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_i1e(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_i1e_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_i1e_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_log1p(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_log1p(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_log1p_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_log1p_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::special_log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_logit(tensor *out__, tensor self, double eps) { PROTECT( auto outputs__ = torch::special_logit(*self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_logit_out(tensor *out__, tensor out, tensor self, double eps) { PROTECT( auto outputs__ = torch::special_logit_out(*out, *self, eps); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::special_logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::special_logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_multigammaln(tensor *out__, tensor self, int64_t p) { PROTECT( auto outputs__ = torch::special_multigammaln(*self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_multigammaln_out(tensor *out__, tensor out, tensor self, int64_t p) { PROTECT( auto outputs__ = torch::special_multigammaln_out(*out, *self, p); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_ndtr(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_ndtr(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_ndtr_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_ndtr_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_ndtri(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_ndtri(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_ndtri_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_ndtri_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_polygamma(tensor *out__, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::special_polygamma(n, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::special_polygamma_out(*out, n, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_psi(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_psi(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_psi_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_psi_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_round(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_round(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_round_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_round_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_sinc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::special_sinc(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_sinc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::special_sinc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_xlog1py(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlog1py_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_xlog1py_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_xlogy_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta_other_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta_other_scalar_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta_self_scalar(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_zeta(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_special_zeta_self_scalar_out(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( auto outputs__ = torch::special_zeta_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { PROTECT( auto outputs__ = torch::split(*self, split_size, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { PROTECT( auto outputs__ = torch::split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sqrt(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sqrt(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sqrt_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sqrt_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_square(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::square(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_square_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::square_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_square_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::square_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_squeeze(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::squeeze(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_squeeze_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->squeeze_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_squeeze_dim(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::squeeze(*self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_squeeze_dim_(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = self->squeeze_(dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::sspaddmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::sspaddmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::stack(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::std(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_correction_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_mean(tensor *out__, tensor self, int unbiased) { @@ -15008,9 +14972,9 @@ int atg_std_mean(tensor *out__, tensor self, int unbiased) { auto outputs__ = torch::std_mean(*self, (bool)unbiased); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { @@ -15018,9 +14982,9 @@ int atg_std_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int d auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { @@ -15028,153 +14992,153 @@ int atg_std_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided, int return_complex) { PROTECT( auto outputs__ = torch::stft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided, (bool)return_complex); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sub(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sub_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::sub_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sub_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sub_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_subtract(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::subtract(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_subtract_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->subtract_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_subtract_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::subtract_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_subtract_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::subtract(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_subtract_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->subtract_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sum(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::sum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sum_dim_intlist(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sum_intlist_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->sum_to_size(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { @@ -15183,9 +15147,9 @@ int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_svd_u(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv) { @@ -15194,45 +15158,45 @@ int atg_svd_u(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_swapaxes(tensor *out__, tensor self, int64_t axis0, int64_t axis1) { PROTECT( auto outputs__ = torch::swapaxes(*self, axis0, axis1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_swapaxes_(tensor *out__, tensor self, int64_t axis0, int64_t axis1) { PROTECT( auto outputs__ = self->swapaxes_(axis0, axis1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_swapdims(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::swapdims(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_swapdims_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = self->swapdims_(dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { @@ -15240,9 +15204,9 @@ int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { auto outputs__ = torch::symeig(*self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_symeig_e(tensor *out__, tensor e, tensor V, tensor self, int eigenvectors, int upper) { @@ -15250,348 +15214,342 @@ int atg_symeig_e(tensor *out__, tensor e, tensor V, tensor self, int eigenvector auto outputs__ = torch::symeig_out(*e, *V, *self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_t(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::t(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_t_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->t_(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_take(tensor *out__, tensor self, tensor index) { PROTECT( auto outputs__ = torch::take(*self, *index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_take_along_dim(tensor *out__, tensor self, tensor indices, int64_t dim) { PROTECT( auto outputs__ = torch::take_along_dim(*self, *indices, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_take_along_dim_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t dim) { PROTECT( auto outputs__ = torch::take_along_dim_out(*out, *self, *indices, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { PROTECT( auto outputs__ = torch::take_out(*out, *self, *index); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tan(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tan_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tan_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::tan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tanh(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tanh_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::tanh_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tanh_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::tanh_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::tanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tensor_split(tensor *out__, tensor self, int64_t sections, int64_t dim) { PROTECT( auto outputs__ = torch::tensor_split(*self, sections, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tensor_split_indices(tensor *out__, tensor self, int64_t *indices_data, int indices_len, int64_t dim) { PROTECT( auto outputs__ = torch::tensor_split(*self, torch::IntArrayRef(indices_data, indices_len), dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tensor_split_tensor_indices_or_sections(tensor *out__, tensor self, tensor tensor_indices_or_sections, int64_t dim) { PROTECT( auto outputs__ = torch::tensor_split(*self, *tensor_indices_or_sections, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { PROTECT( auto outputs__ = torch::tensordot(*self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tensordot_out(tensor *out__, tensor out, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { PROTECT( auto outputs__ = torch::tensordot_out(*out, *self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold_(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scalar threshold) { PROTECT( auto outputs__ = torch::threshold_backward(*grad_output, *self, *threshold); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_threshold_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar threshold) { PROTECT( auto outputs__ = torch::threshold_backward_out(*grad_input, *grad_output, *self, *threshold); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold_out(*out, *self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tile(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::tile(*self, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to(tensor *out__, tensor self, int device) { PROTECT( auto outputs__ = self->to(device_of_int(device)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_dense(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = self->to_dense(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { PROTECT( auto outputs__ = torch::to_dense_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_device(tensor *out__, tensor self, int device, int dtype, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(device_of_int(device), torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_dtype(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_dtype_layout(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_mkldnn(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = self->to_mkldnn(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { PROTECT( auto outputs__ = torch::to_mkldnn_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_other(tensor *out__, tensor self, tensor other, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(*other, (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_sparse(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->to_sparse(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_to_sparse_sparse_dim(tensor *out__, tensor self, int64_t sparse_dim) { PROTECT( auto outputs__ = self->to_sparse(sparse_dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, int sorted) { @@ -15599,9 +15557,9 @@ int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, in auto outputs__ = torch::topk(*self, k, dim, (bool)largest, (bool)sorted); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_topk_values(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted) { @@ -15609,90 +15567,90 @@ int atg_topk_values(tensor *out__, tensor values, tensor indices, tensor self, i auto outputs__ = torch::topk_out(*values, *indices, *self, k, dim, (bool)largest, (bool)sorted); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_totype(tensor *out__, tensor self, int scalar_type) { PROTECT( auto outputs__ = self->toType(torch::ScalarType(scalar_type)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trace(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trace(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trace_backward(tensor *out__, tensor grad, int64_t *sizes_data, int sizes_len) { PROTECT( auto outputs__ = torch::trace_backward(*grad, torch::IntArrayRef(sizes_data, sizes_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::transpose(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = self->transpose_(dim0, dim1); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trapezoid(tensor *out__, tensor y, int64_t dim) { PROTECT( auto outputs__ = torch::trapezoid(*y, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trapezoid_x(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( auto outputs__ = torch::trapezoid(*y, *x, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( auto outputs__ = torch::trapz(*y, *x, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trapz_dx(tensor *out__, tensor y, double dx, int64_t dim) { PROTECT( auto outputs__ = torch::trapz(*y, dx, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { @@ -15700,9 +15658,9 @@ int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int tr auto outputs__ = torch::triangular_solve(*self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triangular_solve_x(tensor *out__, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular) { @@ -15710,235 +15668,231 @@ int atg_triangular_solve_x(tensor *out__, tensor X, tensor M, tensor self, tenso auto outputs__ = torch::triangular_solve_out(*X, *M, *self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tril(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::tril(*self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = self->tril_(diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::tril_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::tril_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction) { PROTECT( auto outputs__ = torch::triplet_margin_loss(*anchor, *positive, *negative, margin, p, eps, (bool)swap, reduction); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triu(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::triu(*self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = self->triu_(diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::triu_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::triu_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_true_divide(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::true_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_true_divide_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->true_divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_true_divide_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::true_divide_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_true_divide_scalar(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::true_divide(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_true_divide_scalar_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->true_divide_(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trunc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trunc(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trunc_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trunc_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_trunc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::trunc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_type_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->type_as(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unbind(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::unbind(*self, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unflatten(tensor *out__, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len) { PROTECT( auto outputs__ = self->unflatten(dim, torch::IntArrayRef(sizes_data, sizes_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unflatten_dense_tensors(tensor *out__, tensor flat, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::unflatten_dense_tensors(*flat, of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { PROTECT( auto outputs__ = self->unfold(dimension, size, step); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unfold_backward(tensor *out__, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step) { PROTECT( auto outputs__ = torch::unfold_backward(*grad_in, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, size, step); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_uniform_(tensor *out__, tensor self, double from, double to) { PROTECT( auto outputs__ = self->uniform_(from, to); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { @@ -15947,9 +15901,9 @@ int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int r out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts) { @@ -15958,9 +15912,9 @@ int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int retu out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int return_inverse, int return_counts) { @@ -15969,384 +15923,378 @@ int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int retu out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unsafe_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { PROTECT( auto outputs__ = torch::unsafe_chunk(*self, chunks, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unsafe_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { PROTECT( auto outputs__ = torch::unsafe_split(*self, split_size, dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unsafe_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { PROTECT( auto outputs__ = torch::unsafe_split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::unsqueeze(*self, dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = self->unsqueeze_(dim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bicubic2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bilinear2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { PROTECT( auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { PROTECT( auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_linear1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { PROTECT( auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { PROTECT( auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales) { PROTECT( auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { PROTECT( auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest1d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { PROTECT( auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales) { PROTECT( auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest2d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_trilinear3d_backward_grad_input(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_value_selecting_reduction_backward(tensor *out__, tensor grad, int64_t dim, tensor indices, int64_t *sizes_data, int sizes_len, int keepdim) { PROTECT( auto outputs__ = torch::value_selecting_reduction_backward(*grad, dim, *indices, torch::IntArrayRef(sizes_data, sizes_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_values(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->values(); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vander(tensor *out__, tensor x, int64_t n, int increasing) { PROTECT( auto outputs__ = torch::vander(*x, n, (bool)increasing); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::var(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_correction_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { PROTECT( auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_mean(tensor *out__, tensor self, int unbiased) { @@ -16354,9 +16302,9 @@ int atg_var_mean(tensor *out__, tensor self, int unbiased) { auto outputs__ = torch::var_mean(*self, (bool)unbiased); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int64_t correction, int keepdim) { @@ -16364,9 +16312,9 @@ int atg_var_mean_correction(tensor *out__, tensor self, int64_t *dim_data, int d auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), correction, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { @@ -16374,284 +16322,278 @@ int atg_var_mean_dim(tensor *out__, tensor self, int64_t *dim_data, int dim_len, auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vdot(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::vdot(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vdot_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::vdot_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->view(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_view_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->view_as(*other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_view_as_complex(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::view_as_complex(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_view_as_real(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::view_as_real(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_view_dtype(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = self->view(torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vsplit(tensor *out__, tensor self, int64_t sections) { PROTECT( auto outputs__ = torch::vsplit(*self, sections); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vsplit_array(tensor *out__, tensor self, int64_t *indices_data, int indices_len) { PROTECT( auto outputs__ = torch::vsplit(*self, torch::IntArrayRef(indices_data, indices_len)); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vstack(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::vstack(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_vstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::vstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_where(tensor *out__, tensor condition) { PROTECT( auto outputs__ = torch::where(*condition); int sz = outputs__.size(); - // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); for (int i = 0; i < sz; ++i) out__[i] = new torch::Tensor(outputs__[i]); out__[sz] = nullptr; - // return out__; - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_where_scalar(tensor *out__, tensor condition, scalar self, scalar other) { PROTECT( auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_where_scalarother(tensor *out__, tensor condition, tensor self, scalar other) { PROTECT( auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_where_scalarself(tensor *out__, tensor condition, scalar self, tensor other) { PROTECT( auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_where_self(tensor *out__, tensor condition, tensor self, tensor other) { PROTECT( auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::xlogy_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_outscalar_other(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_outscalar_self(tensor *out__, tensor out, scalar self, tensor other) { PROTECT( auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_outtensor(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::xlogy_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_scalar_other(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_scalar_other_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::xlogy_(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_xlogy_scalar_self(tensor *out__, scalar self, tensor other) { PROTECT( auto outputs__ = torch::xlogy(*self, *other); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_zero_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::zero_(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_zeros_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::zeros_like(*self); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } int atg_zeros_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::zeros_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); - return 0; -) -return 1; + return 0; + ) + return 1; } diff --git a/deps/c_wrapper/torch_api_generated.h b/deps/c_wrapper/torch_api_generated.h index 63657b70..0c3b01cf 100644 --- a/deps/c_wrapper/torch_api_generated.h +++ b/deps/c_wrapper/torch_api_generated.h @@ -185,7 +185,6 @@ int atg__thnn_fused_gru_cell_backward(tensor *, tensor grad_hy, tensor workspace int atg__thnn_fused_lstm_cell(tensor *, tensor input_gates, tensor hidden_gates, tensor cx, tensor input_bias, tensor hidden_bias); int atg__thnn_fused_lstm_cell_backward(tensor *, tensor grad_hy, tensor grad_cy, tensor cx, tensor cy, tensor workspace, int has_bias); int atg__to_copy(tensor *, tensor self, int options_kind, int options_device, int non_blocking); -// tensor *atg__to_cpu(tensor *tensors_data, int tensors_len); int atg__to_cpu(tensor *, tensor *tensors_data, int tensors_len); int atg__trilinear(tensor *, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim); int atg__unique(tensor *, tensor self, int sorted, int return_inverse); @@ -251,7 +250,6 @@ int atg_affine_grid_generator(tensor *, tensor theta, int64_t *size_data, int si int atg_affine_grid_generator_backward(tensor *, tensor grad, int64_t *size_data, int size_len, int align_corners); int atg_alias(tensor *, tensor self); int atg_align_as(tensor *, tensor self, tensor other); -// tensor *atg_align_tensors(tensor *tensors_data, int tensors_len); int atg_align_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_all(tensor *, tensor self); int atg_all_all_out(tensor *, tensor out, tensor self); @@ -317,13 +315,10 @@ int atg_atanh(tensor *, tensor self); int atg_atanh_(tensor *, tensor self); int atg_atanh_out(tensor *, tensor out, tensor self); int atg_atleast_1d(tensor *, tensor self); -// tensor *atg_atleast_1d_sequence(tensor *tensors_data, int tensors_len); int atg_atleast_1d_sequence(tensor *, tensor *tensors_data, int tensors_len); int atg_atleast_2d(tensor *, tensor self); -// tensor *atg_atleast_2d_sequence(tensor *tensors_data, int tensors_len); int atg_atleast_2d_sequence(tensor *, tensor *tensors_data, int tensors_len); int atg_atleast_3d(tensor *, tensor self); -// tensor *atg_atleast_3d_sequence(tensor *tensors_data, int tensors_len); int atg_atleast_3d_sequence(tensor *, tensor *tensors_data, int tensors_len); int atg_avg_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad); int atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); @@ -402,7 +397,6 @@ int atg_blackman_window_periodic(tensor *, int64_t window_length, int periodic, int atg_block_diag(tensor *, tensor *tensors_data, int tensors_len); int atg_bmm(tensor *, tensor self, tensor mat2); int atg_bmm_out(tensor *, tensor out, tensor self, tensor mat2); -// tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len); int atg_broadcast_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_broadcast_to(tensor *, tensor self, int64_t *size_data, int size_len); int atg_bucketize(tensor *, tensor self, tensor boundaries, int out_int32, int right); @@ -428,7 +422,6 @@ int atg_cholesky_out(tensor *, tensor out, tensor self, int upper); int atg_cholesky_solve(tensor *, tensor self, tensor input2, int upper); int atg_cholesky_solve_out(tensor *, tensor out, tensor self, tensor input2, int upper); int atg_choose_qparams_optimized(tensor *, tensor input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); -// tensor *atg_chunk(tensor self, int64_t chunks, int64_t dim); int atg_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); int atg_clamp(tensor *, tensor self, scalar min, scalar max); int atg_clamp_(tensor *, tensor self, scalar min, scalar max); @@ -549,7 +542,6 @@ int atg_deg2rad(tensor *, tensor self); int atg_deg2rad_(tensor *, tensor self); int atg_deg2rad_out(tensor *, tensor out, tensor self); int atg_dequantize(tensor *, tensor self); -// tensor *atg_dequantize_tensors(tensor *tensors_data, int tensors_len); int atg_dequantize_tensors(tensor *, tensor *tensors_data, int tensors_len); int atg_det(tensor *, tensor self); int atg_detach(tensor *, tensor self); @@ -591,9 +583,7 @@ int atg_dot(tensor *, tensor self, tensor tensor); int atg_dot_out(tensor *, tensor out, tensor self, tensor tensor); int atg_dropout(tensor *, tensor input, double p, int train); int atg_dropout_(tensor *, tensor self, double p, int train); -// tensor *atg_dsplit(tensor self, int64_t sections); int atg_dsplit(tensor *, tensor self, int64_t sections); -// tensor *atg_dsplit_array(tensor self, int64_t *indices_data, int indices_len); int atg_dsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); int atg_dstack(tensor *, tensor *tensors_data, int tensors_len); int atg_dstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); @@ -841,9 +831,7 @@ int atg_heaviside_out(tensor *, tensor out, tensor self, tensor values); int atg_hinge_embedding_loss(tensor *, tensor self, tensor target, double margin, int64_t reduction); int atg_histc(tensor *, tensor self, int64_t bins); int atg_histc_out(tensor *, tensor out, tensor self, int64_t bins); -// tensor *atg_hsplit(tensor self, int64_t sections); int atg_hsplit(tensor *, tensor self, int64_t sections); -// tensor *atg_hsplit_array(tensor self, int64_t *indices_data, int indices_len); int atg_hsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); int atg_hspmm(tensor *, tensor mat1, tensor mat2); int atg_hspmm_out(tensor *, tensor out, tensor mat1, tensor mat2); @@ -1132,9 +1120,7 @@ int atg_mean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_l int atg_median(tensor *, tensor self); int atg_median_dim(tensor *, tensor self, int64_t dim, int keepdim); int atg_median_dim_values(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); -// tensor *atg_meshgrid(tensor *tensors_data, int tensors_len); int atg_meshgrid(tensor *, tensor *tensors_data, int tensors_len); -// tensor *atg_meshgrid_indexing(tensor *tensors_data, int tensors_len, char * indexing); int atg_meshgrid_indexing(tensor *, tensor *tensors_data, int tensors_len, char * indexing); int atg_min(tensor *, tensor self); int atg_min_dim(tensor *, tensor self, int64_t dim, int keepdim); @@ -1270,7 +1256,6 @@ int atg_nll_loss_backward_grad_input(tensor *, tensor grad_input, tensor grad_ou int atg_nll_loss_nd(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nll_loss_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); int atg_nonzero(tensor *, tensor self); -// tensor *atg_nonzero_numpy(tensor self); int atg_nonzero_numpy(tensor *, tensor self); int atg_nonzero_out(tensor *, tensor out, tensor self); int atg_norm(tensor *, tensor self); @@ -1352,7 +1337,6 @@ int atg_quantile_scalar_out(tensor *, tensor out, tensor self, double q, int64_t int atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); int atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); int atg_quantize_per_tensor_tensor_qparams(tensor *, tensor self, tensor scale, tensor zero_point, int dtype); -// tensor *atg_quantize_per_tensor_tensors(tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); int atg_quantize_per_tensor_tensors(tensor *, tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); int atg_quantized_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point); int atg_quantized_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); @@ -1631,9 +1615,7 @@ int atg_special_zeta_other_scalar_out(tensor *, tensor out, tensor self, scalar int atg_special_zeta_out(tensor *, tensor out, tensor self, tensor other); int atg_special_zeta_self_scalar(tensor *, scalar self, tensor other); int atg_special_zeta_self_scalar_out(tensor *, tensor out, scalar self, tensor other); -// tensor *atg_split(tensor self, int64_t split_size, int64_t dim); int atg_split(tensor *, tensor self, int64_t split_size, int64_t dim); -// tensor *atg_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); int atg_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); int atg_sqrt(tensor *, tensor self); int atg_sqrt_(tensor *, tensor self); @@ -1694,11 +1676,8 @@ int atg_tanh_(tensor *, tensor self); int atg_tanh_backward(tensor *, tensor grad_output, tensor output); int atg_tanh_backward_grad_input(tensor *, tensor grad_input, tensor grad_output, tensor output); int atg_tanh_out(tensor *, tensor out, tensor self); -// tensor *atg_tensor_split(tensor self, int64_t sections, int64_t dim); int atg_tensor_split(tensor *, tensor self, int64_t sections, int64_t dim); -// tensor *atg_tensor_split_indices(tensor self, int64_t *indices_data, int indices_len, int64_t dim); int atg_tensor_split_indices(tensor *, tensor self, int64_t *indices_data, int indices_len, int64_t dim); -// tensor *atg_tensor_split_tensor_indices_or_sections(tensor self, tensor tensor_indices_or_sections, int64_t dim); int atg_tensor_split_tensor_indices_or_sections(tensor *, tensor self, tensor tensor_indices_or_sections, int64_t dim); int atg_tensordot(tensor *, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); int atg_tensordot_out(tensor *, tensor out, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); @@ -1750,10 +1729,8 @@ int atg_trunc(tensor *, tensor self); int atg_trunc_(tensor *, tensor self); int atg_trunc_out(tensor *, tensor out, tensor self); int atg_type_as(tensor *, tensor self, tensor other); -// tensor *atg_unbind(tensor self, int64_t dim); int atg_unbind(tensor *, tensor self, int64_t dim); int atg_unflatten(tensor *, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len); -// tensor *atg_unflatten_dense_tensors(tensor flat, tensor *tensors_data, int tensors_len); int atg_unflatten_dense_tensors(tensor *, tensor flat, tensor *tensors_data, int tensors_len); int atg_unfold(tensor *, tensor self, int64_t dimension, int64_t size, int64_t step); int atg_unfold_backward(tensor *, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step); @@ -1761,11 +1738,8 @@ int atg_uniform_(tensor *, tensor self, double from, double to); int atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim); int atg_unique_dim(tensor *, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts); int atg_unique_dim_consecutive(tensor *, tensor self, int64_t dim, int return_inverse, int return_counts); -// tensor *atg_unsafe_chunk(tensor self, int64_t chunks, int64_t dim); int atg_unsafe_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); -// tensor *atg_unsafe_split(tensor self, int64_t split_size, int64_t dim); int atg_unsafe_split(tensor *, tensor self, int64_t split_size, int64_t dim); -// tensor *atg_unsafe_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); int atg_unsafe_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); int atg_unsqueeze(tensor *, tensor self, int64_t dim); int atg_unsqueeze_(tensor *, tensor self, int64_t dim); @@ -1815,13 +1789,10 @@ int atg_view_as(tensor *, tensor self, tensor other); int atg_view_as_complex(tensor *, tensor self); int atg_view_as_real(tensor *, tensor self); int atg_view_dtype(tensor *, tensor self, int dtype); -// tensor *atg_vsplit(tensor self, int64_t sections); int atg_vsplit(tensor *, tensor self, int64_t sections); -// tensor *atg_vsplit_array(tensor self, int64_t *indices_data, int indices_len); int atg_vsplit_array(tensor *, tensor self, int64_t *indices_data, int indices_len); int atg_vstack(tensor *, tensor *tensors_data, int tensors_len); int atg_vstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); -// tensor *atg_where(tensor condition); int atg_where(tensor *, tensor condition); int atg_where_scalar(tensor *, tensor condition, scalar self, scalar other); int atg_where_scalarother(tensor *, tensor condition, tensor self, scalar other); diff --git a/deps/c_wrapper_generator/bin/main.ml b/deps/c_wrapper_generator/bin/main.ml index 495182a1..79a502b2 100644 --- a/deps/c_wrapper_generator/bin/main.ml +++ b/deps/c_wrapper_generator/bin/main.ml @@ -296,19 +296,14 @@ let write_cpp funcs filename = pc " auto outputs__ = %s;" (Func.c_call func); (* the returned type is a C++ vector of tensors *) pc " int sz = outputs__.size();"; - pc - " // torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * \ - sizeof(torch::Tensor*));"; pc " for (int i = 0; i < sz; ++i)"; pc " out__[i] = new torch::Tensor(outputs__[i]);"; pc " out__[sz] = nullptr;"; - pc " // return out__;"; - pc " return 0;"; - pc ")"; - pc "return 1;"; + pc " return 0;"; + pc " )"; + pc " return 1;"; pc "}"; pc ""; - ph "// tensor *atg_%s(%s);" exported_name c_typed_args_list; ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list | `fixed ntensors -> pc "int atg_%s(tensor *out__, %s) {" exported_name c_typed_args_list; @@ -320,9 +315,9 @@ let write_cpp funcs filename = for i = 0 to ntensors - 1 do pc " out__[%d] = new torch::Tensor(std::get<%d>(outputs__));" i i done; - pc " return 0;"; - pc ")"; - pc "return 1;"; + pc " return 0;"; + pc " )"; + pc " return 1;"; pc "}"; pc ""; ph "int atg_%s(tensor *, %s);" exported_name c_typed_args_list)))